[llvm] r328601 - [x86] add RUN for target before roundss; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 26 17:32:19 PDT 2018


Author: spatel
Date: Mon Mar 26 17:32:19 2018
New Revision: 328601

URL: http://llvm.org/viewvc/llvm-project?rev=328601&view=rev
Log:
[x86] add RUN for target before roundss; NFC

Modified:
    llvm/trunk/test/CodeGen/X86/ftrunc.ll

Modified: llvm/trunk/test/CodeGen/X86/ftrunc.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/ftrunc.ll?rev=328601&r1=328600&r2=328601&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/ftrunc.ll (original)
+++ llvm/trunk/test/CodeGen/X86/ftrunc.ll Mon Mar 26 17:32:19 2018
@@ -1,8 +1,17 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2    | FileCheck %s --check-prefix=SSE2
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1  | FileCheck %s --check-prefix=SSE41
 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx     | FileCheck %s --check-prefix=AVX1
 
 define float @trunc_unsigned_f32(float %x) nounwind {
+; SSE2-LABEL: trunc_unsigned_f32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttss2si %xmm0, %rax
+; SSE2-NEXT:    movl %eax, %eax
+; SSE2-NEXT:    xorps %xmm0, %xmm0
+; SSE2-NEXT:    cvtsi2ssq %rax, %xmm0
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_unsigned_f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    cvttss2si %xmm0, %rax
@@ -17,12 +26,30 @@ define float @trunc_unsigned_f32(float %
 ; AVX1-NEXT:    movl %eax, %eax
 ; AVX1-NEXT:    vcvtsi2ssq %rax, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
-	%i = fptoui float %x to i32
-	%r = uitofp i32 %i to float
-	ret float %r
+  %i = fptoui float %x to i32
+  %r = uitofp i32 %i to float
+  ret float %r
 }
 
 define double @trunc_unsigned_f64(double %x) nounwind {
+; SSE2-LABEL: trunc_unsigned_f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
+; SSE2-NEXT:    movapd %xmm0, %xmm2
+; SSE2-NEXT:    subsd %xmm1, %xmm2
+; SSE2-NEXT:    cvttsd2si %xmm2, %rax
+; SSE2-NEXT:    movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
+; SSE2-NEXT:    xorq %rax, %rcx
+; SSE2-NEXT:    cvttsd2si %xmm0, %rax
+; SSE2-NEXT:    ucomisd %xmm1, %xmm0
+; SSE2-NEXT:    cmovaeq %rcx, %rax
+; SSE2-NEXT:    movq %rax, %xmm1
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
+; SSE2-NEXT:    subpd {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT:    addpd %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_unsigned_f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movsd {{.*#+}} xmm1 = mem[0],zero
@@ -55,12 +82,40 @@ define double @trunc_unsigned_f64(double
 ; AVX1-NEXT:    vsubpd {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
-	%i = fptoui double %x to i64
-	%r = uitofp i64 %i to double
-	ret double %r
+  %i = fptoui double %x to i64
+  %r = uitofp i64 %i to double
+  ret double %r
 }
 
 define <4 x float> @trunc_unsigned_v4f32(<4 x float> %x) nounwind {
+; SSE2-LABEL: trunc_unsigned_v4f32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1],xmm0[2,3]
+; SSE2-NEXT:    cvttss2si %xmm1, %rax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    movaps %xmm0, %xmm2
+; SSE2-NEXT:    movhlps {{.*#+}} xmm2 = xmm0[1],xmm2[1]
+; SSE2-NEXT:    cvttss2si %xmm2, %rax
+; SSE2-NEXT:    movd %eax, %xmm2
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
+; SSE2-NEXT:    cvttss2si %xmm0, %rax
+; SSE2-NEXT:    movd %eax, %xmm1
+; SSE2-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,1,2,3]
+; SSE2-NEXT:    cvttss2si %xmm0, %rax
+; SSE2-NEXT:    movd %eax, %xmm0
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE2-NEXT:    punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT:    movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535]
+; SSE2-NEXT:    pand %xmm1, %xmm0
+; SSE2-NEXT:    por {{.*}}(%rip), %xmm0
+; SSE2-NEXT:    psrld $16, %xmm1
+; SSE2-NEXT:    por {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    addps {{.*}}(%rip), %xmm1
+; SSE2-NEXT:    addps %xmm0, %xmm1
+; SSE2-NEXT:    movaps %xmm1, %xmm0
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_unsigned_v4f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
@@ -103,12 +158,47 @@ define <4 x float> @trunc_unsigned_v4f32
 ; AVX1-NEXT:    vaddps {{.*}}(%rip), %xmm0, %xmm0
 ; AVX1-NEXT:    vaddps %xmm0, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
-	%i = fptoui <4 x float> %x to <4 x i32>
-	%r = uitofp <4 x i32> %i to <4 x float>
-	ret <4 x float> %r
+  %i = fptoui <4 x float> %x to <4 x i32>
+  %r = uitofp <4 x i32> %i to <4 x float>
+  ret <4 x float> %r
 }
 
 define <2 x double> @trunc_unsigned_v2f64(<2 x double> %x) nounwind {
+; SSE2-LABEL: trunc_unsigned_v2f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
+; SSE2-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE2-NEXT:    movaps %xmm1, %xmm3
+; SSE2-NEXT:    subsd %xmm2, %xmm3
+; SSE2-NEXT:    cvttsd2si %xmm3, %rax
+; SSE2-NEXT:    movabsq $-9223372036854775808, %rcx # imm = 0x8000000000000000
+; SSE2-NEXT:    xorq %rcx, %rax
+; SSE2-NEXT:    cvttsd2si %xmm1, %rdx
+; SSE2-NEXT:    ucomisd %xmm2, %xmm1
+; SSE2-NEXT:    cmovaeq %rax, %rdx
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    subsd %xmm2, %xmm1
+; SSE2-NEXT:    cvttsd2si %xmm1, %rax
+; SSE2-NEXT:    xorq %rcx, %rax
+; SSE2-NEXT:    cvttsd2si %xmm0, %rcx
+; SSE2-NEXT:    ucomisd %xmm2, %xmm0
+; SSE2-NEXT:    cmovaeq %rax, %rcx
+; SSE2-NEXT:    movq %rcx, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT:    movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
+; SSE2-NEXT:    subpd %xmm3, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT:    addpd %xmm1, %xmm0
+; SSE2-NEXT:    movq %rdx, %xmm1
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT:    subpd %xmm3, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
+; SSE2-NEXT:    addpd %xmm1, %xmm2
+; SSE2-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm2[0]
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_unsigned_v2f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movaps %xmm0, %xmm1
@@ -167,12 +257,74 @@ define <2 x double> @trunc_unsigned_v2f6
 ; AVX1-NEXT:    vsubpd %xmm2, %xmm1, %xmm1
 ; AVX1-NEXT:    vhaddpd %xmm1, %xmm0, %xmm0
 ; AVX1-NEXT:    retq
-	%i = fptoui <2 x double> %x to <2 x i64>
-	%r = uitofp <2 x i64> %i to <2 x double>
-	ret <2 x double> %r
+  %i = fptoui <2 x double> %x to <2 x i64>
+  %r = uitofp <2 x i64> %i to <2 x double>
+  ret <2 x double> %r
 }
 
 define <4 x double> @trunc_unsigned_v4f64(<4 x double> %x) nounwind {
+; SSE2-LABEL: trunc_unsigned_v4f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    movaps %xmm1, %xmm3
+; SSE2-NEXT:    movhlps {{.*#+}} xmm3 = xmm1[1],xmm3[1]
+; SSE2-NEXT:    movsd {{.*#+}} xmm2 = mem[0],zero
+; SSE2-NEXT:    movaps %xmm3, %xmm4
+; SSE2-NEXT:    subsd %xmm2, %xmm4
+; SSE2-NEXT:    cvttsd2si %xmm4, %rcx
+; SSE2-NEXT:    movabsq $-9223372036854775808, %rdx # imm = 0x8000000000000000
+; SSE2-NEXT:    xorq %rdx, %rcx
+; SSE2-NEXT:    cvttsd2si %xmm3, %rax
+; SSE2-NEXT:    ucomisd %xmm2, %xmm3
+; SSE2-NEXT:    cmovaeq %rcx, %rax
+; SSE2-NEXT:    movaps %xmm1, %xmm3
+; SSE2-NEXT:    subsd %xmm2, %xmm3
+; SSE2-NEXT:    cvttsd2si %xmm3, %rsi
+; SSE2-NEXT:    xorq %rdx, %rsi
+; SSE2-NEXT:    cvttsd2si %xmm1, %rcx
+; SSE2-NEXT:    ucomisd %xmm2, %xmm1
+; SSE2-NEXT:    cmovaeq %rsi, %rcx
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
+; SSE2-NEXT:    movaps %xmm1, %xmm3
+; SSE2-NEXT:    subsd %xmm2, %xmm3
+; SSE2-NEXT:    cvttsd2si %xmm3, %rsi
+; SSE2-NEXT:    xorq %rdx, %rsi
+; SSE2-NEXT:    cvttsd2si %xmm1, %rdi
+; SSE2-NEXT:    ucomisd %xmm2, %xmm1
+; SSE2-NEXT:    cmovaeq %rsi, %rdi
+; SSE2-NEXT:    movaps %xmm0, %xmm1
+; SSE2-NEXT:    subsd %xmm2, %xmm1
+; SSE2-NEXT:    cvttsd2si %xmm1, %rsi
+; SSE2-NEXT:    xorq %rdx, %rsi
+; SSE2-NEXT:    cvttsd2si %xmm0, %rdx
+; SSE2-NEXT:    ucomisd %xmm2, %xmm0
+; SSE2-NEXT:    cmovaeq %rsi, %rdx
+; SSE2-NEXT:    movq %rdx, %xmm1
+; SSE2-NEXT:    movdqa {{.*#+}} xmm2 = [1127219200,1160773632,0,0]
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT:    movapd {{.*#+}} xmm3 = [4.503600e+15,1.934281e+25]
+; SSE2-NEXT:    subpd %xmm3, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
+; SSE2-NEXT:    addpd %xmm1, %xmm0
+; SSE2-NEXT:    movq %rdi, %xmm1
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
+; SSE2-NEXT:    subpd %xmm3, %xmm1
+; SSE2-NEXT:    pshufd {{.*#+}} xmm4 = xmm1[2,3,0,1]
+; SSE2-NEXT:    addpd %xmm1, %xmm4
+; SSE2-NEXT:    unpcklpd {{.*#+}} xmm0 = xmm0[0],xmm4[0]
+; SSE2-NEXT:    movq %rcx, %xmm4
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE2-NEXT:    subpd %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm1 = xmm4[2,3,0,1]
+; SSE2-NEXT:    addpd %xmm4, %xmm1
+; SSE2-NEXT:    movq %rax, %xmm4
+; SSE2-NEXT:    punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE2-NEXT:    subpd %xmm3, %xmm4
+; SSE2-NEXT:    pshufd {{.*#+}} xmm2 = xmm4[2,3,0,1]
+; SSE2-NEXT:    addpd %xmm4, %xmm2
+; SSE2-NEXT:    unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_unsigned_v4f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    movaps %xmm1, %xmm3
@@ -276,12 +428,19 @@ define <4 x double> @trunc_unsigned_v4f6
 ; AVX1-NEXT:    vhaddpd %xmm1, %xmm3, %xmm1
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
-	%i = fptoui <4 x double> %x to <4 x i64>
-	%r = uitofp <4 x i64> %i to <4 x double>
-	ret <4 x double> %r
+  %i = fptoui <4 x double> %x to <4 x i64>
+  %r = uitofp <4 x i64> %i to <4 x double>
+  ret <4 x double> %r
 }
 
 define float @trunc_signed_f32(float %x) nounwind {
+; SSE2-LABEL: trunc_signed_f32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttss2si %xmm0, %eax
+; SSE2-NEXT:    xorps %xmm0, %xmm0
+; SSE2-NEXT:    cvtsi2ssl %eax, %xmm0
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_signed_f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    cvttss2si %xmm0, %eax
@@ -294,12 +453,19 @@ define float @trunc_signed_f32(float %x)
 ; AVX1-NEXT:    vcvttss2si %xmm0, %eax
 ; AVX1-NEXT:    vcvtsi2ssl %eax, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
-	%i = fptosi float %x to i32
-	%r = sitofp i32 %i to float
-	ret float %r
+  %i = fptosi float %x to i32
+  %r = sitofp i32 %i to float
+  ret float %r
 }
 
 define double @trunc_signed_f64(double %x) nounwind {
+; SSE2-LABEL: trunc_signed_f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttsd2si %xmm0, %rax
+; SSE2-NEXT:    xorps %xmm0, %xmm0
+; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_signed_f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    cvttsd2si %xmm0, %rax
@@ -312,12 +478,18 @@ define double @trunc_signed_f64(double %
 ; AVX1-NEXT:    vcvttsd2si %xmm0, %rax
 ; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm1, %xmm0
 ; AVX1-NEXT:    retq
-	%i = fptosi double %x to i64
-	%r = sitofp i64 %i to double
-	ret double %r
+  %i = fptosi double %x to i64
+  %r = sitofp i64 %i to double
+  ret double %r
 }
 
 define <4 x float> @trunc_signed_v4f32(<4 x float> %x) nounwind {
+; SSE2-LABEL: trunc_signed_v4f32:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttps2dq %xmm0, %xmm0
+; SSE2-NEXT:    cvtdq2ps %xmm0, %xmm0
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_signed_v4f32:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    cvttps2dq %xmm0, %xmm0
@@ -329,12 +501,23 @@ define <4 x float> @trunc_signed_v4f32(<
 ; AVX1-NEXT:    vcvttps2dq %xmm0, %xmm0
 ; AVX1-NEXT:    vcvtdq2ps %xmm0, %xmm0
 ; AVX1-NEXT:    retq
-	%i = fptosi <4 x float> %x to <4 x i32>
-	%r = sitofp <4 x i32> %i to <4 x float>
-	ret <4 x float> %r
+  %i = fptosi <4 x float> %x to <4 x i32>
+  %r = sitofp <4 x i32> %i to <4 x float>
+  ret <4 x float> %r
 }
 
 define <2 x double> @trunc_signed_v2f64(<2 x double> %x) nounwind {
+; SSE2-LABEL: trunc_signed_v2f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttsd2si %xmm0, %rax
+; SSE2-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE2-NEXT:    cvttsd2si %xmm0, %rcx
+; SSE2-NEXT:    xorps %xmm0, %xmm0
+; SSE2-NEXT:    cvtsi2sdq %rax, %xmm0
+; SSE2-NEXT:    cvtsi2sdq %rcx, %xmm1
+; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_signed_v2f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    cvttsd2si %xmm0, %rax
@@ -355,12 +538,31 @@ define <2 x double> @trunc_signed_v2f64(
 ; AVX1-NEXT:    vcvtsi2sdq %rax, %xmm2, %xmm1
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
 ; AVX1-NEXT:    retq
-	%i = fptosi <2 x double> %x to <2 x i64>
-	%r = sitofp <2 x i64> %i to <2 x double>
-	ret <2 x double> %r
+  %i = fptosi <2 x double> %x to <2 x i64>
+  %r = sitofp <2 x i64> %i to <2 x double>
+  ret <2 x double> %r
 }
 
 define <4 x double> @trunc_signed_v4f64(<4 x double> %x) nounwind {
+; SSE2-LABEL: trunc_signed_v4f64:
+; SSE2:       # %bb.0:
+; SSE2-NEXT:    cvttsd2si %xmm1, %rax
+; SSE2-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
+; SSE2-NEXT:    cvttsd2si %xmm1, %rcx
+; SSE2-NEXT:    cvttsd2si %xmm0, %rdx
+; SSE2-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
+; SSE2-NEXT:    cvttsd2si %xmm0, %rsi
+; SSE2-NEXT:    xorps %xmm0, %xmm0
+; SSE2-NEXT:    cvtsi2sdq %rdx, %xmm0
+; SSE2-NEXT:    xorps %xmm1, %xmm1
+; SSE2-NEXT:    cvtsi2sdq %rsi, %xmm1
+; SSE2-NEXT:    movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE2-NEXT:    xorps %xmm1, %xmm1
+; SSE2-NEXT:    cvtsi2sdq %rax, %xmm1
+; SSE2-NEXT:    cvtsi2sdq %rcx, %xmm2
+; SSE2-NEXT:    movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT:    retq
+;
 ; SSE41-LABEL: trunc_signed_v4f64:
 ; SSE41:       # %bb.0:
 ; SSE41-NEXT:    cvttsd2si %xmm1, %rax
@@ -397,8 +599,8 @@ define <4 x double> @trunc_signed_v4f64(
 ; AVX1-NEXT:    vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
 ; AVX1-NEXT:    vinsertf128 $1, %xmm0, %ymm1, %ymm0
 ; AVX1-NEXT:    retq
-	%i = fptosi <4 x double> %x to <4 x i64>
-	%r = sitofp <4 x i64> %i to <4 x double>
-	ret <4 x double> %r
+  %i = fptosi <4 x double> %x to <4 x i64>
+  %r = sitofp <4 x i64> %i to <4 x double>
+  ret <4 x double> %r
 }
 




More information about the llvm-commits mailing list