[llvm-branch-commits] [llvm-branch] r369362 - Merging r367412 and r367429:
Hans Wennborg via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Aug 20 03:10:05 PDT 2019
Author: hans
Date: Tue Aug 20 03:10:05 2019
New Revision: 369362
URL: http://llvm.org/viewvc/llvm-project?rev=369362&view=rev
Log:
Merging r367412 and r367429:
------------------------------------------------------------------------
r367412 | rksimon | 2019-07-31 13:35:01 +0200 (Wed, 31 Jul 2019) | 1 line
[X86][AVX] Add reduced test case for PR42833
------------------------------------------------------------------------
------------------------------------------------------------------------
r367429 | rksimon | 2019-07-31 14:55:39 +0200 (Wed, 31 Jul 2019) | 3 lines
[X86][AVX] Ensure chained subvector insertions are the same size (PR42833)
Before combining insert_subvector(insert_subvector(vec, sub0, c0), sub1, c1) patterns, ensure that the subvectors are all the same type. On AVX512 targets especially we might have a mixture of 128/256 subvector insertions.
------------------------------------------------------------------------
Modified:
llvm/branches/release_90/ (props changed)
llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp
llvm/branches/release_90/test/CodeGen/X86/oddsubvector.ll
Propchange: llvm/branches/release_90/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 20 03:10:05 2019
@@ -1,3 +1,3 @@
/llvm/branches/Apple/Pertwee:110850,110961
/llvm/branches/type-system-rewrite:133420-134817
-/llvm/trunk:155241,366431,366481,366487,366527,366570,366660,366868,366925,367019,367030,367062,367084,367124,367215,367292,367304,367306,367314,367340-367341,367394,367396,367398,367403,367417,367662,367750,367753,367846-367847,367898,367941,368004,368230,368300,368315,368324,368477-368478,368517-368519,368554,368572,368873,369011,369026,369084,369097,369168,369199
+/llvm/trunk:155241,366431,366481,366487,366527,366570,366660,366868,366925,367019,367030,367062,367084,367124,367215,367292,367304,367306,367314,367340-367341,367394,367396,367398,367403,367412,367417,367429,367662,367750,367753,367846-367847,367898,367941,368004,368230,368300,368315,368324,368477-368478,368517-368519,368554,368572,368873,369011,369026,369084,369097,369168,369199
Modified: llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp?rev=369362&r1=369361&r2=369362&view=diff
==============================================================================
--- llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp (original)
+++ llvm/branches/release_90/lib/Target/X86/X86ISelLowering.cpp Tue Aug 20 03:10:05 2019
@@ -5505,6 +5505,7 @@ static bool collectConcatOps(SDNode *N,
if (VT.getSizeInBits() == (SubVT.getSizeInBits() * 2) &&
Idx == (VT.getVectorNumElements() / 2) &&
Src.getOpcode() == ISD::INSERT_SUBVECTOR &&
+ Src.getOperand(1).getValueType() == SubVT &&
isNullConstant(Src.getOperand(2))) {
Ops.push_back(Src.getOperand(1));
Ops.push_back(Sub);
@@ -43840,6 +43841,7 @@ static SDValue combineInsertSubvector(SD
Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
OpVT.getSizeInBits() == SubVecVT.getSizeInBits() * 2 &&
isNullConstant(Vec.getOperand(2)) && !Vec.getOperand(0).isUndef() &&
+ Vec.getOperand(1).getValueSizeInBits() == SubVecVT.getSizeInBits() &&
Vec.hasOneUse()) {
Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, OpVT, DAG.getUNDEF(OpVT),
Vec.getOperand(1), Vec.getOperand(2));
Modified: llvm/branches/release_90/test/CodeGen/X86/oddsubvector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_90/test/CodeGen/X86/oddsubvector.ll?rev=369362&r1=369361&r2=369362&view=diff
==============================================================================
--- llvm/branches/release_90/test/CodeGen/X86/oddsubvector.ll (original)
+++ llvm/branches/release_90/test/CodeGen/X86/oddsubvector.ll Tue Aug 20 03:10:05 2019
@@ -190,3 +190,239 @@ define <16 x i32> @PR42819(<8 x i32>* %a
%3 = shufflevector <16 x i32> zeroinitializer, <16 x i32> %2, <16 x i32> <i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18>
ret <16 x i32> %3
}
+
+ at b = dso_local local_unnamed_addr global i32 0, align 4
+ at c = dso_local local_unnamed_addr global [49 x i32] zeroinitializer, align 16
+ at d = dso_local local_unnamed_addr global [49 x i32] zeroinitializer, align 16
+
+define void @PR42833() {
+; SSE2-LABEL: PR42833:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movdqa c+{{.*}}(%rip), %xmm1
+; SSE2-NEXT: movdqa c+{{.*}}(%rip), %xmm0
+; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: addl {{.*}}(%rip), %eax
+; SSE2-NEXT: movd %eax, %xmm2
+; SSE2-NEXT: movaps {{.*#+}} xmm3 = <u,1,1,1>
+; SSE2-NEXT: movss {{.*#+}} xmm3 = xmm2[0],xmm3[1,2,3]
+; SSE2-NEXT: movdqa %xmm0, %xmm4
+; SSE2-NEXT: paddd %xmm3, %xmm4
+; SSE2-NEXT: pslld $23, %xmm3
+; SSE2-NEXT: paddd {{.*}}(%rip), %xmm3
+; SSE2-NEXT: cvttps2dq %xmm3, %xmm3
+; SSE2-NEXT: movdqa %xmm0, %xmm5
+; SSE2-NEXT: pmuludq %xmm3, %xmm5
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm3, %xmm6
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm6[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
+; SSE2-NEXT: movss {{.*#+}} xmm5 = xmm4[0],xmm5[1,2,3]
+; SSE2-NEXT: movdqa d+{{.*}}(%rip), %xmm3
+; SSE2-NEXT: psubd %xmm1, %xmm3
+; SSE2-NEXT: paddd %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm1, c+{{.*}}(%rip)
+; SSE2-NEXT: movaps %xmm5, c+{{.*}}(%rip)
+; SSE2-NEXT: movdqa c+{{.*}}(%rip), %xmm1
+; SSE2-NEXT: movdqa c+{{.*}}(%rip), %xmm4
+; SSE2-NEXT: movdqa d+{{.*}}(%rip), %xmm5
+; SSE2-NEXT: movdqa d+{{.*}}(%rip), %xmm6
+; SSE2-NEXT: movdqa d+{{.*}}(%rip), %xmm7
+; SSE2-NEXT: movss {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3]
+; SSE2-NEXT: psubd %xmm0, %xmm7
+; SSE2-NEXT: psubd %xmm4, %xmm6
+; SSE2-NEXT: psubd %xmm1, %xmm5
+; SSE2-NEXT: movdqa %xmm5, d+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm6, d+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm3, d+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm7, d+{{.*}}(%rip)
+; SSE2-NEXT: paddd %xmm4, %xmm4
+; SSE2-NEXT: paddd %xmm1, %xmm1
+; SSE2-NEXT: movdqa %xmm1, c+{{.*}}(%rip)
+; SSE2-NEXT: movdqa %xmm4, c+{{.*}}(%rip)
+; SSE2-NEXT: retq
+;
+; SSE42-LABEL: PR42833:
+; SSE42: # %bb.0:
+; SSE42-NEXT: movdqa c+{{.*}}(%rip), %xmm1
+; SSE42-NEXT: movdqa c+{{.*}}(%rip), %xmm0
+; SSE42-NEXT: movd %xmm0, %eax
+; SSE42-NEXT: addl {{.*}}(%rip), %eax
+; SSE42-NEXT: movdqa {{.*#+}} xmm2 = <u,1,1,1>
+; SSE42-NEXT: pinsrd $0, %eax, %xmm2
+; SSE42-NEXT: movdqa %xmm0, %xmm3
+; SSE42-NEXT: paddd %xmm2, %xmm3
+; SSE42-NEXT: pslld $23, %xmm2
+; SSE42-NEXT: paddd {{.*}}(%rip), %xmm2
+; SSE42-NEXT: cvttps2dq %xmm2, %xmm2
+; SSE42-NEXT: pmulld %xmm0, %xmm2
+; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm3[0,1],xmm2[2,3,4,5,6,7]
+; SSE42-NEXT: movdqa d+{{.*}}(%rip), %xmm3
+; SSE42-NEXT: psubd %xmm1, %xmm3
+; SSE42-NEXT: paddd %xmm1, %xmm1
+; SSE42-NEXT: movdqa %xmm1, c+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm2, c+{{.*}}(%rip)
+; SSE42-NEXT: movdqa c+{{.*}}(%rip), %xmm1
+; SSE42-NEXT: movdqa c+{{.*}}(%rip), %xmm2
+; SSE42-NEXT: movdqa d+{{.*}}(%rip), %xmm4
+; SSE42-NEXT: movdqa d+{{.*}}(%rip), %xmm5
+; SSE42-NEXT: movdqa d+{{.*}}(%rip), %xmm6
+; SSE42-NEXT: pinsrd $0, %eax, %xmm0
+; SSE42-NEXT: psubd %xmm0, %xmm6
+; SSE42-NEXT: psubd %xmm2, %xmm5
+; SSE42-NEXT: psubd %xmm1, %xmm4
+; SSE42-NEXT: movdqa %xmm4, d+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm5, d+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm3, d+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm6, d+{{.*}}(%rip)
+; SSE42-NEXT: paddd %xmm2, %xmm2
+; SSE42-NEXT: paddd %xmm1, %xmm1
+; SSE42-NEXT: movdqa %xmm1, c+{{.*}}(%rip)
+; SSE42-NEXT: movdqa %xmm2, c+{{.*}}(%rip)
+; SSE42-NEXT: retq
+;
+; AVX1-LABEL: PR42833:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa c+{{.*}}(%rip), %xmm0
+; AVX1-NEXT: vmovd %xmm0, %eax
+; AVX1-NEXT: addl {{.*}}(%rip), %eax
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = <u,1,1,1>
+; AVX1-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm2
+; AVX1-NEXT: vmovdqa c+{{.*}}(%rip), %xmm3
+; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm1, %xmm1
+; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm1
+; AVX1-NEXT: vpslld $1, %xmm3, %xmm3
+; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7]
+; AVX1-NEXT: vmovdqa d+{{.*}}(%rip), %xmm2
+; AVX1-NEXT: vpsubd c+{{.*}}(%rip), %xmm2, %xmm2
+; AVX1-NEXT: vmovups %ymm1, c+{{.*}}(%rip)
+; AVX1-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0
+; AVX1-NEXT: vmovdqa d+{{.*}}(%rip), %xmm1
+; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0
+; AVX1-NEXT: vmovdqa d+{{.*}}(%rip), %xmm1
+; AVX1-NEXT: vmovdqa c+{{.*}}(%rip), %xmm3
+; AVX1-NEXT: vpsubd %xmm3, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa d+{{.*}}(%rip), %xmm4
+; AVX1-NEXT: vmovdqa c+{{.*}}(%rip), %xmm5
+; AVX1-NEXT: vpsubd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa %xmm2, d+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm4, d+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm1, d+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm0, d+{{.*}}(%rip)
+; AVX1-NEXT: vpaddd %xmm3, %xmm3, %xmm0
+; AVX1-NEXT: vpaddd %xmm5, %xmm5, %xmm1
+; AVX1-NEXT: vmovdqa %xmm1, c+{{.*}}(%rip)
+; AVX1-NEXT: vmovdqa %xmm0, c+{{.*}}(%rip)
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: PR42833:
+; AVX2: # %bb.0:
+; AVX2-NEXT: movl {{.*}}(%rip), %eax
+; AVX2-NEXT: vmovdqu c+{{.*}}(%rip), %ymm0
+; AVX2-NEXT: addl c+{{.*}}(%rip), %eax
+; AVX2-NEXT: vmovd %eax, %xmm1
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0],mem[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpaddd %ymm2, %ymm0, %ymm3
+; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
+; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3,4,5,6,7]
+; AVX2-NEXT: vmovdqu %ymm2, c+{{.*}}(%rip)
+; AVX2-NEXT: vmovdqu c+{{.*}}(%rip), %ymm2
+; AVX2-NEXT: vmovdqu d+{{.*}}(%rip), %ymm3
+; AVX2-NEXT: vmovdqu d+{{.*}}(%rip), %ymm4
+; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3,4,5,6,7]
+; AVX2-NEXT: vpsubd %ymm0, %ymm4, %ymm0
+; AVX2-NEXT: vpsubd %ymm2, %ymm3, %ymm1
+; AVX2-NEXT: vmovdqu %ymm1, d+{{.*}}(%rip)
+; AVX2-NEXT: vmovdqu %ymm0, d+{{.*}}(%rip)
+; AVX2-NEXT: vpaddd %ymm2, %ymm2, %ymm0
+; AVX2-NEXT: vmovdqu %ymm0, c+{{.*}}(%rip)
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: PR42833:
+; AVX512: # %bb.0:
+; AVX512-NEXT: movl {{.*}}(%rip), %eax
+; AVX512-NEXT: vmovdqu c+{{.*}}(%rip), %ymm0
+; AVX512-NEXT: vmovdqu64 c+{{.*}}(%rip), %zmm1
+; AVX512-NEXT: addl c+{{.*}}(%rip), %eax
+; AVX512-NEXT: vmovd %eax, %xmm2
+; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],mem[1,2,3,4,5,6,7]
+; AVX512-NEXT: vpaddd %ymm2, %ymm0, %ymm3
+; AVX512-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
+; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1,2,3,4,5,6,7]
+; AVX512-NEXT: vmovdqa c+{{.*}}(%rip), %xmm2
+; AVX512-NEXT: vmovdqu %ymm0, c+{{.*}}(%rip)
+; AVX512-NEXT: vmovdqu c+{{.*}}(%rip), %ymm0
+; AVX512-NEXT: vmovdqu64 d+{{.*}}(%rip), %zmm3
+; AVX512-NEXT: vpinsrd $0, %eax, %xmm2, %xmm2
+; AVX512-NEXT: vinserti32x4 $0, %xmm2, %zmm1, %zmm1
+; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm1
+; AVX512-NEXT: vpsubd %zmm1, %zmm3, %zmm1
+; AVX512-NEXT: vmovdqu64 %zmm1, d+{{.*}}(%rip)
+; AVX512-NEXT: vpaddd %ymm0, %ymm0, %ymm0
+; AVX512-NEXT: vmovdqu %ymm0, c+{{.*}}(%rip)
+; AVX512-NEXT: vzeroupper
+; AVX512-NEXT: retq
+;
+; XOP-LABEL: PR42833:
+; XOP: # %bb.0:
+; XOP-NEXT: vmovdqa c+{{.*}}(%rip), %xmm0
+; XOP-NEXT: vmovd %xmm0, %eax
+; XOP-NEXT: addl {{.*}}(%rip), %eax
+; XOP-NEXT: vmovdqa {{.*#+}} xmm1 = <u,1,1,1>
+; XOP-NEXT: vpinsrd $0, %eax, %xmm1, %xmm1
+; XOP-NEXT: vpaddd %xmm1, %xmm0, %xmm2
+; XOP-NEXT: vmovdqa c+{{.*}}(%rip), %xmm3
+; XOP-NEXT: vpshld %xmm1, %xmm0, %xmm1
+; XOP-NEXT: vpslld $1, %xmm3, %xmm3
+; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
+; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3,4,5,6,7]
+; XOP-NEXT: vmovdqa d+{{.*}}(%rip), %xmm2
+; XOP-NEXT: vpsubd c+{{.*}}(%rip), %xmm2, %xmm2
+; XOP-NEXT: vmovups %ymm1, c+{{.*}}(%rip)
+; XOP-NEXT: vpinsrd $0, %eax, %xmm0, %xmm0
+; XOP-NEXT: vmovdqa d+{{.*}}(%rip), %xmm1
+; XOP-NEXT: vpsubd %xmm0, %xmm1, %xmm0
+; XOP-NEXT: vmovdqa d+{{.*}}(%rip), %xmm1
+; XOP-NEXT: vmovdqa c+{{.*}}(%rip), %xmm3
+; XOP-NEXT: vpsubd %xmm3, %xmm1, %xmm1
+; XOP-NEXT: vmovdqa d+{{.*}}(%rip), %xmm4
+; XOP-NEXT: vmovdqa c+{{.*}}(%rip), %xmm5
+; XOP-NEXT: vpsubd %xmm5, %xmm4, %xmm4
+; XOP-NEXT: vmovdqa %xmm2, d+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm4, d+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm1, d+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm0, d+{{.*}}(%rip)
+; XOP-NEXT: vpaddd %xmm3, %xmm3, %xmm0
+; XOP-NEXT: vpaddd %xmm5, %xmm5, %xmm1
+; XOP-NEXT: vmovdqa %xmm1, c+{{.*}}(%rip)
+; XOP-NEXT: vmovdqa %xmm0, c+{{.*}}(%rip)
+; XOP-NEXT: vzeroupper
+; XOP-NEXT: retq
+ %1 = load i32, i32* @b, align 4
+ %2 = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([49 x i32], [49 x i32]* @c, i64 0, i64 32) to <8 x i32>*), align 16
+ %3 = shufflevector <8 x i32> %2, <8 x i32> undef, <16 x i32> <i32 undef, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %4 = extractelement <8 x i32> %2, i32 0
+ %5 = add i32 %1, %4
+ %6 = insertelement <8 x i32> <i32 undef, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>, i32 %5, i32 0
+ %7 = add <8 x i32> %2, %6
+ %8 = shl <8 x i32> %2, %6
+ %9 = shufflevector <8 x i32> %7, <8 x i32> %8, <8 x i32> <i32 0, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ store <8 x i32> %9, <8 x i32>* bitcast (i32* getelementptr inbounds ([49 x i32], [49 x i32]* @c, i64 0, i64 32) to <8 x i32>*), align 16
+ %10 = load <8 x i32>, <8 x i32>* bitcast (i32* getelementptr inbounds ([49 x i32], [49 x i32]* @c, i64 0, i64 40) to <8 x i32>*), align 16
+ %11 = shufflevector <8 x i32> %10, <8 x i32> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+ %12 = load <16 x i32>, <16 x i32>* bitcast (i32* getelementptr inbounds ([49 x i32], [49 x i32]* @d, i64 0, i64 32) to <16 x i32>*), align 16
+ %13 = insertelement <16 x i32> %3, i32 %5, i32 0
+ %14 = shufflevector <16 x i32> %13, <16 x i32> %11, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+ %15 = sub <16 x i32> %12, %14
+ store <16 x i32> %15, <16 x i32>* bitcast (i32* getelementptr inbounds ([49 x i32], [49 x i32]* @d, i64 0, i64 32) to <16 x i32>*), align 16
+ %16 = shl <8 x i32> %10, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ store <8 x i32> %16, <8 x i32>* bitcast (i32* getelementptr inbounds ([49 x i32], [49 x i32]* @c, i64 0, i64 40) to <8 x i32>*), align 16
+ ret void
+}
More information about the llvm-branch-commits
mailing list