<div dir="ltr">In case you've not seen, this caused PR23369.</div><div class="gmail_extra"><br><div class="gmail_quote">On Mon, Apr 27, 2015 at 12:55 AM, Simon Pilgrim <span dir="ltr"><<a href="mailto:llvm-dev@redking.me.uk" target="_blank">llvm-dev@redking.me.uk</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Author: rksimon<br>
Date: Mon Apr 27 02:55:46 2015<br>
New Revision: 235837<br>
<br>
URL: <a href="http://llvm.org/viewvc/llvm-project?rev=235837&view=rev" target="_blank">http://llvm.org/viewvc/llvm-project?rev=235837&view=rev</a><br>
Log:<br>
[X86][SSE] Add v16i8/v32i8 multiplication support<br>
<br>
Patch to allow int8 vectors to be multiplied on the SSE unit instead of being scalarized.<br>
<br>
The patch sign extends the i8 lanes to i16, uses the SSE2 pmullw multiplication instruction, then packs the lower byte from each result.<br>
<br>
Differential Revision: <a href="http://reviews.llvm.org/D9115" target="_blank">http://reviews.llvm.org/D9115</a><br>
<br>
Modified:<br>
llvm/trunk/lib/Target/X86/X86ISelLowering.cpp<br>
llvm/trunk/test/CodeGen/X86/avx2-arith.ll<br>
llvm/trunk/test/CodeGen/X86/pmul.ll<br>
<br>
Modified: llvm/trunk/lib/Target/X86/X86ISelLowering.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=235837&r1=235836&r2=235837&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/X86/X86ISelLowering.cpp?rev=235837&r1=235836&r2=235837&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/lib/Target/X86/X86ISelLowering.cpp (original)<br>
+++ llvm/trunk/lib/Target/X86/X86ISelLowering.cpp Mon Apr 27 02:55:46 2015<br>
@@ -802,6 +802,7 @@ X86TargetLowering::X86TargetLowering(con<br>
setOperationAction(ISD::ADD, MVT::v8i16, Legal);<br>
setOperationAction(ISD::ADD, MVT::v4i32, Legal);<br>
setOperationAction(ISD::ADD, MVT::v2i64, Legal);<br>
+ setOperationAction(ISD::MUL, MVT::v16i8, Custom);<br>
setOperationAction(ISD::MUL, MVT::v4i32, Custom);<br>
setOperationAction(ISD::MUL, MVT::v2i64, Custom);<br>
setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);<br>
@@ -1122,7 +1123,7 @@ X86TargetLowering::X86TargetLowering(con<br>
setOperationAction(ISD::MUL, MVT::v4i64, Custom);<br>
setOperationAction(ISD::MUL, MVT::v8i32, Legal);<br>
setOperationAction(ISD::MUL, MVT::v16i16, Legal);<br>
- // Don't lower v32i8 because there is no 128-bit byte mul<br>
+ setOperationAction(ISD::MUL, MVT::v32i8, Custom);<br>
<br>
setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);<br>
setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);<br>
@@ -1171,7 +1172,7 @@ X86TargetLowering::X86TargetLowering(con<br>
setOperationAction(ISD::MUL, MVT::v4i64, Custom);<br>
setOperationAction(ISD::MUL, MVT::v8i32, Custom);<br>
setOperationAction(ISD::MUL, MVT::v16i16, Custom);<br>
- // Don't lower v32i8 because there is no 128-bit byte mul<br>
+ setOperationAction(ISD::MUL, MVT::v32i8, Custom);<br>
}<br>
<br>
// In the customized shift lowering, the legal cases in AVX2 will be<br>
@@ -9894,7 +9895,7 @@ static SDValue lower256BitVectorShuffle(<br>
int NumV2Elements = std::count_if(Mask.begin(), Mask.end(), [NumElts](int M) {<br>
return M >= NumElts;<br>
});<br>
-<br>
+<br>
if (NumV2Elements == 1 && Mask[0] >= NumElts)<br>
if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(<br>
DL, VT, V1, V2, Mask, Subtarget, DAG))<br>
@@ -10646,7 +10647,7 @@ SDValue X86TargetLowering::LowerINSERT_V<br>
return DAG.getNode(X86ISD::BLENDI, dl, VT, N0, N1Vec, N2);<br>
}<br>
}<br>
-<br>
+<br>
// Get the desired 128-bit vector chunk.<br>
SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);<br>
<br>
@@ -15908,6 +15909,79 @@ static SDValue LowerMUL(SDValue Op, cons<br>
SDValue A = Op.getOperand(0);<br>
SDValue B = Op.getOperand(1);<br>
<br>
+ // Lower v16i8/v32i8 mul as promotion to v8i16/v16i16 vector<br>
+ // pairs, multiply and truncate.<br>
+ if (VT == MVT::v16i8 || VT == MVT::v32i8) {<br>
+ if (Subtarget->hasInt256()) {<br>
+ if (VT == MVT::v32i8) {<br>
+ MVT SubVT = MVT::getVectorVT(MVT::i8, VT.getVectorNumElements() / 2);<br>
+ SDValue Lo = DAG.getIntPtrConstant(0);<br>
+ SDValue Hi = DAG.getIntPtrConstant(VT.getVectorNumElements() / 2);<br>
+ SDValue ALo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Lo);<br>
+ SDValue BLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Lo);<br>
+ SDValue AHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, A, Hi);<br>
+ SDValue BHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, SubVT, B, Hi);<br>
+ return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,<br>
+ DAG.getNode(ISD::MUL, dl, SubVT, ALo, BLo),<br>
+ DAG.getNode(ISD::MUL, dl, SubVT, AHi, BHi));<br>
+ }<br>
+<br>
+ MVT ExVT = MVT::getVectorVT(MVT::i16, VT.getVectorNumElements());<br>
+ return DAG.getNode(<br>
+ ISD::TRUNCATE, dl, VT,<br>
+ DAG.getNode(ISD::MUL, dl, ExVT,<br>
+ DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, A),<br>
+ DAG.getNode(ISD::SIGN_EXTEND, dl, ExVT, B)));<br>
+ }<br>
+<br>
+ assert(VT == MVT::v16i8 &&<br>
+ "Pre-AVX2 support only supports v16i8 multiplication");<br>
+ MVT ExVT = MVT::v8i16;<br>
+<br>
+ // Extract the lo parts and sign extend to i16<br>
+ SDValue ALo, BLo;<br>
+ if (Subtarget->hasSSE41()) {<br>
+ ALo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, A);<br>
+ BLo = DAG.getNode(X86ISD::VSEXT, dl, ExVT, B);<br>
+ } else {<br>
+ const int ShufMask[] = {0, -1, 1, -1, 2, -1, 3, -1,<br>
+ 4, -1, 5, -1, 6, -1, 7, -1};<br>
+ ALo = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);<br>
+ BLo = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);<br>
+ ALo = DAG.getNode(ISD::BITCAST, dl, ExVT, ALo);<br>
+ BLo = DAG.getNode(ISD::BITCAST, dl, ExVT, BLo);<br>
+ ALo = DAG.getNode(ISD::SRA, dl, ExVT, ALo, DAG.getConstant(8, ExVT));<br>
+ BLo = DAG.getNode(ISD::SRA, dl, ExVT, BLo, DAG.getConstant(8, ExVT));<br>
+ }<br>
+<br>
+ // Extract the hi parts and sign extend to i16<br>
+ SDValue AHi, BHi;<br>
+ if (Subtarget->hasSSE41()) {<br>
+ const int ShufMask[] = {8, 9, 10, 11, 12, 13, 14, 15,<br>
+ -1, -1, -1, -1, -1, -1, -1, -1};<br>
+ AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);<br>
+ BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);<br>
+ AHi = DAG.getNode(X86ISD::VSEXT, dl, ExVT, AHi);<br>
+ BHi = DAG.getNode(X86ISD::VSEXT, dl, ExVT, BHi);<br>
+ } else {<br>
+ const int ShufMask[] = {8, -1, 9, -1, 10, -1, 11, -1,<br>
+ 12, -1, 13, -1, 14, -1, 15, -1};<br>
+ AHi = DAG.getVectorShuffle(VT, dl, A, A, ShufMask);<br>
+ BHi = DAG.getVectorShuffle(VT, dl, B, B, ShufMask);<br>
+ AHi = DAG.getNode(ISD::BITCAST, dl, ExVT, AHi);<br>
+ BHi = DAG.getNode(ISD::BITCAST, dl, ExVT, BHi);<br>
+ AHi = DAG.getNode(ISD::SRA, dl, ExVT, AHi, DAG.getConstant(8, ExVT));<br>
+ BHi = DAG.getNode(ISD::SRA, dl, ExVT, BHi, DAG.getConstant(8, ExVT));<br>
+ }<br>
+<br>
+ // Multiply, mask the lower 8bits of the lo/hi results and pack<br>
+ SDValue RLo = DAG.getNode(ISD::MUL, dl, ExVT, ALo, BLo);<br>
+ SDValue RHi = DAG.getNode(ISD::MUL, dl, ExVT, AHi, BHi);<br>
+ RLo = DAG.getNode(ISD::AND, dl, ExVT, RLo, DAG.getConstant(255, ExVT));<br>
+ RHi = DAG.getNode(ISD::AND, dl, ExVT, RHi, DAG.getConstant(255, ExVT));<br>
+ return DAG.getNode(X86ISD::PACKUS, dl, VT, RLo, RHi);<br>
+ }<br>
+<br>
// Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.<br>
if (VT == MVT::v4i32) {<br>
assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&<br>
<br>
Modified: llvm/trunk/test/CodeGen/X86/avx2-arith.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-arith.ll?rev=235837&r1=235836&r2=235837&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx2-arith.ll?rev=235837&r1=235836&r2=235837&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/test/CodeGen/X86/avx2-arith.ll (original)<br>
+++ llvm/trunk/test/CodeGen/X86/avx2-arith.ll Mon Apr 27 02:55:46 2015<br>
@@ -60,6 +60,49 @@ define <16 x i16> @test_vpmullw(<16 x i1<br>
ret <16 x i16> %x<br>
}<br>
<br>
+; CHECK: mul-v16i8<br>
+; CHECK: # BB#0:<br>
+; CHECK-NEXT: vpmovsxbw %xmm1, %ymm1<br>
+; CHECK-NEXT: vpmovsxbw %xmm0, %ymm0<br>
+; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm0<br>
+; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1<br>
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u><br>
+; CHECK-NEXT: vpshufb %xmm2, %xmm1, %xmm1<br>
+; CHECK-NEXT: vpshufb %xmm2, %xmm0, %xmm0<br>
+; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]<br>
+; CHECK-NEXT: vzeroupper<br>
+; CHECK-NEXT: retq<br>
+define <16 x i8> @mul-v16i8(<16 x i8> %i, <16 x i8> %j) nounwind readnone {<br>
+ %x = mul <16 x i8> %i, %j<br>
+ ret <16 x i8> %x<br>
+}<br>
+<br>
+; CHECK: mul-v32i8<br>
+; CHECK: # BB#0:<br>
+; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2<br>
+; CHECK-NEXT: vpmovsxbw %xmm2, %ymm2<br>
+; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm3<br>
+; CHECK-NEXT: vpmovsxbw %xmm3, %ymm3<br>
+; CHECK-NEXT: vpmullw %ymm2, %ymm3, %ymm2<br>
+; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm3<br>
+; CHECK-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u><br>
+; CHECK-NEXT: vpshufb %xmm4, %xmm3, %xmm3<br>
+; CHECK-NEXT: vpshufb %xmm4, %xmm2, %xmm2<br>
+; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]<br>
+; CHECK-NEXT: vpmovsxbw %xmm1, %ymm1<br>
+; CHECK-NEXT: vpmovsxbw %xmm0, %ymm0<br>
+; CHECK-NEXT: vpmullw %ymm1, %ymm0, %ymm0<br>
+; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1<br>
+; CHECK-NEXT: vpshufb %xmm4, %xmm1, %xmm1<br>
+; CHECK-NEXT: vpshufb %xmm4, %xmm0, %xmm0<br>
+; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]<br>
+; CHECK-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0<br>
+; CHECK-NEXT: retq<br>
+define <32 x i8> @mul-v32i8(<32 x i8> %i, <32 x i8> %j) nounwind readnone {<br>
+ %x = mul <32 x i8> %i, %j<br>
+ ret <32 x i8> %x<br>
+}<br>
+<br>
; CHECK: mul-v4i64<br>
; CHECK: vpmuludq %ymm<br>
; CHECK-NEXT: vpsrlq $32, %ymm<br>
<br>
Modified: llvm/trunk/test/CodeGen/X86/pmul.ll<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmul.ll?rev=235837&r1=235836&r2=235837&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pmul.ll?rev=235837&r1=235836&r2=235837&view=diff</a><br>
==============================================================================<br>
--- llvm/trunk/test/CodeGen/X86/pmul.ll (original)<br>
+++ llvm/trunk/test/CodeGen/X86/pmul.ll Mon Apr 27 02:55:46 2015<br>
@@ -1,6 +1,53 @@<br>
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=ALL --check-prefix=SSE2<br>
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE41<br>
<br>
+define <16 x i8> @mul8c(<16 x i8> %i) nounwind {<br>
+; SSE2-LABEL: mul8c:<br>
+; SSE2: # BB#0: # %entry<br>
+; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [117,117,117,117,117,117,117,117,117,117,117,117,117,117,117,117]<br>
+; SSE2-NEXT: psraw $8, %xmm1<br>
+; SSE2-NEXT: movdqa %xmm0, %xmm2<br>
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]<br>
+; SSE2-NEXT: psraw $8, %xmm2<br>
+; SSE2-NEXT: pmullw %xmm1, %xmm2<br>
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]<br>
+; SSE2-NEXT: pand %xmm3, %xmm2<br>
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]<br>
+; SSE2-NEXT: psraw $8, %xmm0<br>
+; SSE2-NEXT: pmullw %xmm1, %xmm0<br>
+; SSE2-NEXT: pand %xmm3, %xmm0<br>
+; SSE2-NEXT: packuswb %xmm2, %xmm0<br>
+; SSE2-NEXT: retq<br>
+;<br>
+; SSE41-LABEL: mul8c:<br>
+; SSE41: # BB#0: # %entry<br>
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm1<br>
+; SSE41-NEXT: pmovsxbw {{.*}}(%rip), %xmm2<br>
+; SSE41-NEXT: pmullw %xmm2, %xmm1<br>
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]<br>
+; SSE41-NEXT: pand %xmm3, %xmm1<br>
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]<br>
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm0<br>
+; SSE41-NEXT: pmullw %xmm2, %xmm0<br>
+; SSE41-NEXT: pand %xmm3, %xmm0<br>
+; SSE41-NEXT: packuswb %xmm0, %xmm1<br>
+; SSE41-NEXT: movdqa %xmm1, %xmm0<br>
+; SSE41-NEXT: retq<br>
+entry:<br>
+ %A = mul <16 x i8> %i, < i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117, i8 117 ><br>
+ ret <16 x i8> %A<br>
+}<br>
+<br>
+define <8 x i16> @mul16c(<8 x i16> %i) nounwind {<br>
+; ALL-LABEL: mul16c:<br>
+; ALL: # BB#0: # %entry<br>
+; ALL-NEXT: pmullw {{.*}}(%rip), %xmm0<br>
+; ALL-NEXT: retq<br>
+entry:<br>
+ %A = mul <8 x i16> %i, < i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117, i16 117 ><br>
+ ret <8 x i16> %A<br>
+}<br>
+<br>
define <4 x i32> @a(<4 x i32> %i) nounwind {<br>
; SSE2-LABEL: a:<br>
; SSE2: # BB#0: # %entry<br>
@@ -42,6 +89,59 @@ entry:<br>
ret <2 x i64> %A<br>
}<br>
<br>
+define <16 x i8> @mul8(<16 x i8> %i, <16 x i8> %j) nounwind {<br>
+; SSE2-LABEL: mul8:<br>
+; SSE2: # BB#0: # %entry<br>
+; SSE2-NEXT: movdqa %xmm1, %xmm3<br>
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]<br>
+; SSE2-NEXT: psraw $8, %xmm3<br>
+; SSE2-NEXT: movdqa %xmm0, %xmm2<br>
+; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]<br>
+; SSE2-NEXT: psraw $8, %xmm2<br>
+; SSE2-NEXT: pmullw %xmm3, %xmm2<br>
+; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]<br>
+; SSE2-NEXT: pand %xmm3, %xmm2<br>
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]<br>
+; SSE2-NEXT: psraw $8, %xmm1<br>
+; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]<br>
+; SSE2-NEXT: psraw $8, %xmm0<br>
+; SSE2-NEXT: pmullw %xmm1, %xmm0<br>
+; SSE2-NEXT: pand %xmm3, %xmm0<br>
+; SSE2-NEXT: packuswb %xmm0, %xmm2<br>
+; SSE2-NEXT: movdqa %xmm2, %xmm0<br>
+; SSE2-NEXT: retq<br>
+;<br>
+; SSE41-LABEL: mul8:<br>
+; SSE41: # BB#0: # %entry<br>
+; SSE41-NEXT: pmovsxbw %xmm1, %xmm3<br>
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm2<br>
+; SSE41-NEXT: pmullw %xmm3, %xmm2<br>
+; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]<br>
+; SSE41-NEXT: pand %xmm3, %xmm2<br>
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]<br>
+; SSE41-NEXT: pmovsxbw %xmm1, %xmm1<br>
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]<br>
+; SSE41-NEXT: pmovsxbw %xmm0, %xmm0<br>
+; SSE41-NEXT: pmullw %xmm1, %xmm0<br>
+; SSE41-NEXT: pand %xmm3, %xmm0<br>
+; SSE41-NEXT: packuswb %xmm0, %xmm2<br>
+; SSE41-NEXT: movdqa %xmm2, %xmm0<br>
+; SSE41-NEXT: retq<br>
+entry:<br>
+ %A = mul <16 x i8> %i, %j<br>
+ ret <16 x i8> %A<br>
+}<br>
+<br>
+define <8 x i16> @mul16(<8 x i16> %i, <8 x i16> %j) nounwind {<br>
+; ALL-LABEL: mul16:<br>
+; ALL: # BB#0: # %entry<br>
+; ALL-NEXT: pmullw %xmm1, %xmm0<br>
+; ALL-NEXT: retq<br>
+entry:<br>
+ %A = mul <8 x i16> %i, %j<br>
+ ret <8 x i16> %A<br>
+}<br>
+<br>
define <4 x i32> @c(<4 x i32> %i, <4 x i32> %j) nounwind {<br>
; SSE2-LABEL: c:<br>
; SSE2: # BB#0: # %entry<br>
<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@cs.uiuc.edu">llvm-commits@cs.uiuc.edu</a><br>
<a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits" target="_blank">http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits</a><br>
</blockquote></div><br></div>