[llvm] r301334 - [X86][SSE] Add tests for PR14657 showing current codegen.
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 25 10:22:35 PDT 2017
Author: rksimon
Date: Tue Apr 25 12:22:34 2017
New Revision: 301334
URL: http://llvm.org/viewvc/llvm-project?rev=301334&view=rev
Log:
[X86][SSE] Add tests for PR14657 showing current codegen.
Added:
llvm/trunk/test/CodeGen/X86/pr14657.ll
Added: llvm/trunk/test/CodeGen/X86/pr14657.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/pr14657.ll?rev=301334&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/pr14657.ll (added)
+++ llvm/trunk/test/CodeGen/X86/pr14657.ll Tue Apr 25 12:22:34 2017
@@ -0,0 +1,325 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
+
+; PR14657 - avoid truncation/extension of comparison results
+
+ at da = common global [1024 x float] zeroinitializer, align 32
+ at db = common global [1024 x float] zeroinitializer, align 32
+ at dc = common global [1024 x float] zeroinitializer, align 32
+ at dd = common global [1024 x float] zeroinitializer, align 32
+ at dj = common global [1024 x i32] zeroinitializer, align 32
+
+define void @_Z9example25v() nounwind uwtable noinline ssp {
+; SSE2-LABEL: _Z9example25v:
+; SSE2: # BB#0: # %vector.ph
+; SSE2-NEXT: movq $-4096, %rax # imm = 0xF000
+; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
+; SSE2-NEXT: .p2align 4, 0x90
+; SSE2-NEXT: .LBB0_1: # %vector.body
+; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
+; SSE2-NEXT: movaps da+4096(%rax), %xmm1
+; SSE2-NEXT: movaps da+4112(%rax), %xmm2
+; SSE2-NEXT: cmpltps db+4112(%rax), %xmm2
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: cmpltps db+4096(%rax), %xmm1
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
+; SSE2-NEXT: psllw $15, %xmm1
+; SSE2-NEXT: psraw $15, %xmm1
+; SSE2-NEXT: movaps dc+4096(%rax), %xmm2
+; SSE2-NEXT: movaps dc+4112(%rax), %xmm3
+; SSE2-NEXT: cmpltps dd+4112(%rax), %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE2-NEXT: cmpltps dd+4096(%rax), %xmm2
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE2-NEXT: psllw $15, %xmm2
+; SSE2-NEXT: psraw $15, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: movdqa %xmm2, %xmm1
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE2-NEXT: pand %xmm0, %xmm1
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: movdqa %xmm2, dj+4112(%rax)
+; SSE2-NEXT: movdqa %xmm1, dj+4096(%rax)
+; SSE2-NEXT: addq $32, %rax
+; SSE2-NEXT: jne .LBB0_1
+; SSE2-NEXT: # BB#2: # %for.end
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: _Z9example25v:
+; SSE41: # BB#0: # %vector.ph
+; SSE41-NEXT: movq $-4096, %rax # imm = 0xF000
+; SSE41-NEXT: movdqa {{.*#+}} xmm0 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,1,1,1]
+; SSE41-NEXT: .p2align 4, 0x90
+; SSE41-NEXT: .LBB0_1: # %vector.body
+; SSE41-NEXT: # =>This Inner Loop Header: Depth=1
+; SSE41-NEXT: movaps da+4096(%rax), %xmm2
+; SSE41-NEXT: movaps da+4112(%rax), %xmm3
+; SSE41-NEXT: cmpltps db+4112(%rax), %xmm3
+; SSE41-NEXT: pshufb %xmm0, %xmm3
+; SSE41-NEXT: cmpltps db+4096(%rax), %xmm2
+; SSE41-NEXT: pshufb %xmm0, %xmm2
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE41-NEXT: psllw $15, %xmm2
+; SSE41-NEXT: psraw $15, %xmm2
+; SSE41-NEXT: movaps dc+4096(%rax), %xmm3
+; SSE41-NEXT: movaps dc+4112(%rax), %xmm4
+; SSE41-NEXT: cmpltps dd+4112(%rax), %xmm4
+; SSE41-NEXT: pshufb %xmm0, %xmm4
+; SSE41-NEXT: cmpltps dd+4096(%rax), %xmm3
+; SSE41-NEXT: pshufb %xmm0, %xmm3
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSE41-NEXT: psllw $15, %xmm3
+; SSE41-NEXT: psraw $15, %xmm3
+; SSE41-NEXT: pand %xmm2, %xmm3
+; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
+; SSE41-NEXT: pand %xmm1, %xmm2
+; SSE41-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
+; SSE41-NEXT: pand %xmm1, %xmm3
+; SSE41-NEXT: movdqa %xmm3, dj+4112(%rax)
+; SSE41-NEXT: movdqa %xmm2, dj+4096(%rax)
+; SSE41-NEXT: addq $32, %rax
+; SSE41-NEXT: jne .LBB0_1
+; SSE41-NEXT: # BB#2: # %for.end
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: _Z9example25v:
+; AVX1: # BB#0: # %vector.ph
+; AVX1-NEXT: movq $-4096, %rax # imm = 0xF000
+; AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [1,1,1,1,1,1,1,1]
+; AVX1-NEXT: .p2align 4, 0x90
+; AVX1-NEXT: .LBB0_1: # %vector.body
+; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX1-NEXT: vmovups da+4096(%rax), %ymm1
+; AVX1-NEXT: vcmpltps db+4096(%rax), %ymm1, %ymm1
+; AVX1-NEXT: vmovups dc+4096(%rax), %ymm2
+; AVX1-NEXT: vcmpltps dd+4096(%rax), %ymm2, %ymm2
+; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX1-NEXT: vandps %ymm0, %ymm1, %ymm1
+; AVX1-NEXT: vmovups %ymm1, dj+4096(%rax)
+; AVX1-NEXT: addq $32, %rax
+; AVX1-NEXT: jne .LBB0_1
+; AVX1-NEXT: # BB#2: # %for.end
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _Z9example25v:
+; AVX2: # BB#0: # %vector.ph
+; AVX2-NEXT: movq $-4096, %rax # imm = 0xF000
+; AVX2-NEXT: vbroadcastss {{.*}}(%rip), %ymm0
+; AVX2-NEXT: .p2align 4, 0x90
+; AVX2-NEXT: .LBB0_1: # %vector.body
+; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX2-NEXT: vmovups da+4096(%rax), %ymm1
+; AVX2-NEXT: vcmpltps db+4096(%rax), %ymm1, %ymm1
+; AVX2-NEXT: vmovups dc+4096(%rax), %ymm2
+; AVX2-NEXT: vcmpltps dd+4096(%rax), %ymm2, %ymm2
+; AVX2-NEXT: vandps %ymm2, %ymm1, %ymm1
+; AVX2-NEXT: vandps %ymm0, %ymm1, %ymm1
+; AVX2-NEXT: vmovups %ymm1, dj+4096(%rax)
+; AVX2-NEXT: addq $32, %rax
+; AVX2-NEXT: jne .LBB0_1
+; AVX2-NEXT: # BB#2: # %for.end
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+vector.ph:
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds [1024 x float], [1024 x float]* @da, i64 0, i64 %index
+ %1 = bitcast float* %0 to <8 x float>*
+ %2 = load <8 x float>, <8 x float>* %1, align 16
+ %3 = getelementptr inbounds [1024 x float], [1024 x float]* @db, i64 0, i64 %index
+ %4 = bitcast float* %3 to <8 x float>*
+ %5 = load <8 x float>, <8 x float>* %4, align 16
+ %6 = fcmp olt <8 x float> %2, %5
+ %7 = getelementptr inbounds [1024 x float], [1024 x float]* @dc, i64 0, i64 %index
+ %8 = bitcast float* %7 to <8 x float>*
+ %9 = load <8 x float>, <8 x float>* %8, align 16
+ %10 = getelementptr inbounds [1024 x float], [1024 x float]* @dd, i64 0, i64 %index
+ %11 = bitcast float* %10 to <8 x float>*
+ %12 = load <8 x float>, <8 x float>* %11, align 16
+ %13 = fcmp olt <8 x float> %9, %12
+ %14 = and <8 x i1> %6, %13
+ %15 = zext <8 x i1> %14 to <8 x i32>
+ %16 = getelementptr inbounds [1024 x i32], [1024 x i32]* @dj, i64 0, i64 %index
+ %17 = bitcast i32* %16 to <8 x i32>*
+ store <8 x i32> %15, <8 x i32>* %17, align 16
+ %index.next = add i64 %index, 8
+ %18 = icmp eq i64 %index.next, 1024
+ br i1 %18, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+}
+
+define void @_Z9example24ss(i16 signext %x, i16 signext %y) nounwind uwtable noinline ssp {
+; SSE2-LABEL: _Z9example24ss:
+; SSE2: # BB#0: # %vector.ph
+; SSE2-NEXT: movd %edi, %xmm0
+; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE2-NEXT: movd %esi, %xmm1
+; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE2-NEXT: movq $-4096, %rax # imm = 0xF000
+; SSE2-NEXT: .p2align 4, 0x90
+; SSE2-NEXT: .LBB1_1: # %vector.body
+; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
+; SSE2-NEXT: movaps da+4096(%rax), %xmm2
+; SSE2-NEXT: movaps da+4112(%rax), %xmm3
+; SSE2-NEXT: cmpltps db+4112(%rax), %xmm3
+; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
+; SSE2-NEXT: cmpltps db+4096(%rax), %xmm2
+; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
+; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE2-NEXT: movdqa %xmm0, %xmm3
+; SSE2-NEXT: pand %xmm2, %xmm3
+; SSE2-NEXT: pandn %xmm1, %xmm2
+; SSE2-NEXT: por %xmm3, %xmm2
+; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
+; SSE2-NEXT: psrad $16, %xmm3
+; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7]
+; SSE2-NEXT: psrad $16, %xmm2
+; SSE2-NEXT: movdqa %xmm2, dj+4112(%rax)
+; SSE2-NEXT: movdqa %xmm3, dj+4096(%rax)
+; SSE2-NEXT: addq $32, %rax
+; SSE2-NEXT: jne .LBB1_1
+; SSE2-NEXT: # BB#2: # %for.end
+; SSE2-NEXT: retq
+;
+; SSE41-LABEL: _Z9example24ss:
+; SSE41: # BB#0: # %vector.ph
+; SSE41-NEXT: movd %edi, %xmm0
+; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE41-NEXT: movd %esi, %xmm1
+; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE41-NEXT: movq $-4096, %rax # imm = 0xF000
+; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
+; SSE41-NEXT: .p2align 4, 0x90
+; SSE41-NEXT: .LBB1_1: # %vector.body
+; SSE41-NEXT: # =>This Inner Loop Header: Depth=1
+; SSE41-NEXT: movaps da+4096(%rax), %xmm3
+; SSE41-NEXT: movaps da+4112(%rax), %xmm4
+; SSE41-NEXT: cmpltps db+4112(%rax), %xmm4
+; SSE41-NEXT: pshufb %xmm2, %xmm4
+; SSE41-NEXT: cmpltps db+4096(%rax), %xmm3
+; SSE41-NEXT: pshufb %xmm2, %xmm3
+; SSE41-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
+; SSE41-NEXT: movdqa %xmm0, %xmm4
+; SSE41-NEXT: pand %xmm3, %xmm4
+; SSE41-NEXT: pandn %xmm1, %xmm3
+; SSE41-NEXT: por %xmm4, %xmm3
+; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,3,0,1]
+; SSE41-NEXT: pmovsxwd %xmm4, %xmm4
+; SSE41-NEXT: pmovsxwd %xmm3, %xmm3
+; SSE41-NEXT: movdqa %xmm3, dj+4096(%rax)
+; SSE41-NEXT: movdqa %xmm4, dj+4112(%rax)
+; SSE41-NEXT: addq $32, %rax
+; SSE41-NEXT: jne .LBB1_1
+; SSE41-NEXT: # BB#2: # %for.end
+; SSE41-NEXT: retq
+;
+; AVX1-LABEL: _Z9example24ss:
+; AVX1: # BB#0: # %vector.ph
+; AVX1-NEXT: vmovd %edi, %xmm0
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; AVX1-NEXT: vmovd %esi, %xmm1
+; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
+; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; AVX1-NEXT: movq $-4096, %rax # imm = 0xF000
+; AVX1-NEXT: .p2align 4, 0x90
+; AVX1-NEXT: .LBB1_1: # %vector.body
+; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX1-NEXT: vmovups da+4096(%rax), %ymm2
+; AVX1-NEXT: vcmpltps db+4096(%rax), %ymm2, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpandn %xmm1, %xmm2, %xmm3
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm2
+; AVX1-NEXT: vpor %xmm3, %xmm2, %xmm2
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm3
+; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; AVX1-NEXT: vpmovsxwd %xmm2, %xmm2
+; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
+; AVX1-NEXT: vmovups %ymm2, dj+4096(%rax)
+; AVX1-NEXT: addq $32, %rax
+; AVX1-NEXT: jne .LBB1_1
+; AVX1-NEXT: # BB#2: # %for.end
+; AVX1-NEXT: vzeroupper
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: _Z9example24ss:
+; AVX2: # BB#0: # %vector.ph
+; AVX2-NEXT: vmovd %edi, %xmm0
+; AVX2-NEXT: vpbroadcastw %xmm0, %xmm0
+; AVX2-NEXT: vmovd %esi, %xmm1
+; AVX2-NEXT: vpbroadcastw %xmm1, %xmm1
+; AVX2-NEXT: movq $-4096, %rax # imm = 0xF000
+; AVX2-NEXT: .p2align 4, 0x90
+; AVX2-NEXT: .LBB1_1: # %vector.body
+; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
+; AVX2-NEXT: vmovups da+4096(%rax), %ymm2
+; AVX2-NEXT: vcmpltps db+4096(%rax), %ymm2, %ymm2
+; AVX2-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX2-NEXT: vpacksswb %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpandn %xmm1, %xmm2, %xmm3
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm2
+; AVX2-NEXT: vpor %xmm3, %xmm2, %xmm2
+; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
+; AVX2-NEXT: vmovdqu %ymm2, dj+4096(%rax)
+; AVX2-NEXT: addq $32, %rax
+; AVX2-NEXT: jne .LBB1_1
+; AVX2-NEXT: # BB#2: # %for.end
+; AVX2-NEXT: vzeroupper
+; AVX2-NEXT: retq
+vector.ph:
+ %0 = insertelement <8 x i16> undef, i16 %x, i32 0
+ %broadcast11 = shufflevector <8 x i16> %0, <8 x i16> undef, <8 x i32> zeroinitializer
+ %1 = insertelement <8 x i16> undef, i16 %y, i32 0
+ %broadcast12 = shufflevector <8 x i16> %1, <8 x i16> undef, <8 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %2 = getelementptr inbounds [1024 x float], [1024 x float]* @da, i64 0, i64 %index
+ %3 = bitcast float* %2 to <8 x float>*
+ %4 = load <8 x float>, <8 x float>* %3, align 16
+ %5 = getelementptr inbounds [1024 x float], [1024 x float]* @db, i64 0, i64 %index
+ %6 = bitcast float* %5 to <8 x float>*
+ %7 = load <8 x float>, <8 x float>* %6, align 16
+ %8 = fcmp olt <8 x float> %4, %7
+ %9 = select <8 x i1> %8, <8 x i16> %broadcast11, <8 x i16> %broadcast12
+ %10 = sext <8 x i16> %9 to <8 x i32>
+ %11 = getelementptr inbounds [1024 x i32], [1024 x i32]* @dj, i64 0, i64 %index
+ %12 = bitcast i32* %11 to <8 x i32>*
+ store <8 x i32> %10, <8 x i32>* %12, align 16
+ %index.next = add i64 %index, 8
+ %13 = icmp eq i64 %index.next, 1024
+ br i1 %13, label %for.end, label %vector.body
+
+for.end: ; preds = %vector.body
+ ret void
+}
More information about the llvm-commits
mailing list