[llvm] r292626 - [SLP] A new test for horizontal vectorization for non-power-of-2

Alexey Bataev via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 20 10:04:29 PST 2017


Author: abataev
Date: Fri Jan 20 12:04:29 2017
New Revision: 292626

URL: http://llvm.org/viewvc/llvm-project?rev=292626&view=rev
Log:
[SLP] A new test for horizontal vectorization for non-power-of-2
instructions.

Modified:
    llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal-list.ll

Modified: llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal-list.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal-list.ll?rev=292626&r1=292625&r2=292626&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal-list.ll (original)
+++ llvm/trunk/test/Transforms/SLPVectorizer/X86/horizontal-list.ll Fri Jan 20 12:04:29 2017
@@ -280,3 +280,293 @@ entry:
   ret float %max.0.mul3.2
 }
 
+define float @f(float* nocapture readonly %x) {
+; CHECK-LABEL: @f(
+; CHECK-NEXT:  entry:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, float* [[X:%.*]], align 4
+; CHECK-NEXT:    [[ARRAYIDX_1:%.*]] = getelementptr inbounds float, float* [[X]], i64 1
+; CHECK-NEXT:    [[TMP1:%.*]] = load float, float* [[ARRAYIDX_1]], align 4
+; CHECK-NEXT:    [[ADD_1:%.*]] = fadd fast float [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, float* [[X]], i64 2
+; CHECK-NEXT:    [[TMP2:%.*]] = load float, float* [[ARRAYIDX_2]], align 4
+; CHECK-NEXT:    [[ADD_2:%.*]] = fadd fast float [[TMP2]], [[ADD_1]]
+; CHECK-NEXT:    [[ARRAYIDX_3:%.*]] = getelementptr inbounds float, float* [[X]], i64 3
+; CHECK-NEXT:    [[TMP3:%.*]] = load float, float* [[ARRAYIDX_3]], align 4
+; CHECK-NEXT:    [[ADD_3:%.*]] = fadd fast float [[TMP3]], [[ADD_2]]
+; CHECK-NEXT:    [[ARRAYIDX_4:%.*]] = getelementptr inbounds float, float* [[X]], i64 4
+; CHECK-NEXT:    [[TMP4:%.*]] = load float, float* [[ARRAYIDX_4]], align 4
+; CHECK-NEXT:    [[ADD_4:%.*]] = fadd fast float [[TMP4]], [[ADD_3]]
+; CHECK-NEXT:    [[ARRAYIDX_5:%.*]] = getelementptr inbounds float, float* [[X]], i64 5
+; CHECK-NEXT:    [[TMP5:%.*]] = load float, float* [[ARRAYIDX_5]], align 4
+; CHECK-NEXT:    [[ADD_5:%.*]] = fadd fast float [[TMP5]], [[ADD_4]]
+; CHECK-NEXT:    [[ARRAYIDX_6:%.*]] = getelementptr inbounds float, float* [[X]], i64 6
+; CHECK-NEXT:    [[TMP6:%.*]] = load float, float* [[ARRAYIDX_6]], align 4
+; CHECK-NEXT:    [[ADD_6:%.*]] = fadd fast float [[TMP6]], [[ADD_5]]
+; CHECK-NEXT:    [[ARRAYIDX_7:%.*]] = getelementptr inbounds float, float* [[X]], i64 7
+; CHECK-NEXT:    [[TMP7:%.*]] = load float, float* [[ARRAYIDX_7]], align 4
+; CHECK-NEXT:    [[ADD_7:%.*]] = fadd fast float [[TMP7]], [[ADD_6]]
+; CHECK-NEXT:    [[ARRAYIDX_8:%.*]] = getelementptr inbounds float, float* [[X]], i64 8
+; CHECK-NEXT:    [[TMP8:%.*]] = load float, float* [[ARRAYIDX_8]], align 4
+; CHECK-NEXT:    [[ADD_8:%.*]] = fadd fast float [[TMP8]], [[ADD_7]]
+; CHECK-NEXT:    [[ARRAYIDX_9:%.*]] = getelementptr inbounds float, float* [[X]], i64 9
+; CHECK-NEXT:    [[TMP9:%.*]] = load float, float* [[ARRAYIDX_9]], align 4
+; CHECK-NEXT:    [[ADD_9:%.*]] = fadd fast float [[TMP9]], [[ADD_8]]
+; CHECK-NEXT:    [[ARRAYIDX_10:%.*]] = getelementptr inbounds float, float* [[X]], i64 10
+; CHECK-NEXT:    [[TMP10:%.*]] = load float, float* [[ARRAYIDX_10]], align 4
+; CHECK-NEXT:    [[ADD_10:%.*]] = fadd fast float [[TMP10]], [[ADD_9]]
+; CHECK-NEXT:    [[ARRAYIDX_11:%.*]] = getelementptr inbounds float, float* [[X]], i64 11
+; CHECK-NEXT:    [[TMP11:%.*]] = load float, float* [[ARRAYIDX_11]], align 4
+; CHECK-NEXT:    [[ADD_11:%.*]] = fadd fast float [[TMP11]], [[ADD_10]]
+; CHECK-NEXT:    [[ARRAYIDX_12:%.*]] = getelementptr inbounds float, float* [[X]], i64 12
+; CHECK-NEXT:    [[TMP12:%.*]] = load float, float* [[ARRAYIDX_12]], align 4
+; CHECK-NEXT:    [[ADD_12:%.*]] = fadd fast float [[TMP12]], [[ADD_11]]
+; CHECK-NEXT:    [[ARRAYIDX_13:%.*]] = getelementptr inbounds float, float* [[X]], i64 13
+; CHECK-NEXT:    [[TMP13:%.*]] = load float, float* [[ARRAYIDX_13]], align 4
+; CHECK-NEXT:    [[ADD_13:%.*]] = fadd fast float [[TMP13]], [[ADD_12]]
+; CHECK-NEXT:    [[ARRAYIDX_14:%.*]] = getelementptr inbounds float, float* [[X]], i64 14
+; CHECK-NEXT:    [[TMP14:%.*]] = load float, float* [[ARRAYIDX_14]], align 4
+; CHECK-NEXT:    [[ADD_14:%.*]] = fadd fast float [[TMP14]], [[ADD_13]]
+; CHECK-NEXT:    [[ARRAYIDX_15:%.*]] = getelementptr inbounds float, float* [[X]], i64 15
+; CHECK-NEXT:    [[TMP15:%.*]] = load float, float* [[ARRAYIDX_15]], align 4
+; CHECK-NEXT:    [[ADD_15:%.*]] = fadd fast float [[TMP15]], [[ADD_14]]
+; CHECK-NEXT:    [[ARRAYIDX_16:%.*]] = getelementptr inbounds float, float* [[X]], i64 16
+; CHECK-NEXT:    [[ARRAYIDX_17:%.*]] = getelementptr inbounds float, float* [[X]], i64 17
+; CHECK-NEXT:    [[ARRAYIDX_18:%.*]] = getelementptr inbounds float, float* [[X]], i64 18
+; CHECK-NEXT:    [[ARRAYIDX_19:%.*]] = getelementptr inbounds float, float* [[X]], i64 19
+; CHECK-NEXT:    [[ARRAYIDX_20:%.*]] = getelementptr inbounds float, float* [[X]], i64 20
+; CHECK-NEXT:    [[ARRAYIDX_21:%.*]] = getelementptr inbounds float, float* [[X]], i64 21
+; CHECK-NEXT:    [[ARRAYIDX_22:%.*]] = getelementptr inbounds float, float* [[X]], i64 22
+; CHECK-NEXT:    [[ARRAYIDX_23:%.*]] = getelementptr inbounds float, float* [[X]], i64 23
+; CHECK-NEXT:    [[ARRAYIDX_24:%.*]] = getelementptr inbounds float, float* [[X]], i64 24
+; CHECK-NEXT:    [[ARRAYIDX_25:%.*]] = getelementptr inbounds float, float* [[X]], i64 25
+; CHECK-NEXT:    [[ARRAYIDX_26:%.*]] = getelementptr inbounds float, float* [[X]], i64 26
+; CHECK-NEXT:    [[ARRAYIDX_27:%.*]] = getelementptr inbounds float, float* [[X]], i64 27
+; CHECK-NEXT:    [[ARRAYIDX_28:%.*]] = getelementptr inbounds float, float* [[X]], i64 28
+; CHECK-NEXT:    [[ARRAYIDX_29:%.*]] = getelementptr inbounds float, float* [[X]], i64 29
+; CHECK-NEXT:    [[ARRAYIDX_30:%.*]] = getelementptr inbounds float, float* [[X]], i64 30
+; CHECK-NEXT:    [[ARRAYIDX_31:%.*]] = getelementptr inbounds float, float* [[X]], i64 31
+; CHECK-NEXT:    [[ARRAYIDX_32:%.*]] = getelementptr inbounds float, float* [[X]], i64 32
+; CHECK-NEXT:    [[ARRAYIDX_33:%.*]] = getelementptr inbounds float, float* [[X]], i64 33
+; CHECK-NEXT:    [[ARRAYIDX_34:%.*]] = getelementptr inbounds float, float* [[X]], i64 34
+; CHECK-NEXT:    [[ARRAYIDX_35:%.*]] = getelementptr inbounds float, float* [[X]], i64 35
+; CHECK-NEXT:    [[ARRAYIDX_36:%.*]] = getelementptr inbounds float, float* [[X]], i64 36
+; CHECK-NEXT:    [[ARRAYIDX_37:%.*]] = getelementptr inbounds float, float* [[X]], i64 37
+; CHECK-NEXT:    [[ARRAYIDX_38:%.*]] = getelementptr inbounds float, float* [[X]], i64 38
+; CHECK-NEXT:    [[ARRAYIDX_39:%.*]] = getelementptr inbounds float, float* [[X]], i64 39
+; CHECK-NEXT:    [[ARRAYIDX_40:%.*]] = getelementptr inbounds float, float* [[X]], i64 40
+; CHECK-NEXT:    [[ARRAYIDX_41:%.*]] = getelementptr inbounds float, float* [[X]], i64 41
+; CHECK-NEXT:    [[ARRAYIDX_42:%.*]] = getelementptr inbounds float, float* [[X]], i64 42
+; CHECK-NEXT:    [[ARRAYIDX_43:%.*]] = getelementptr inbounds float, float* [[X]], i64 43
+; CHECK-NEXT:    [[ARRAYIDX_44:%.*]] = getelementptr inbounds float, float* [[X]], i64 44
+; CHECK-NEXT:    [[ARRAYIDX_45:%.*]] = getelementptr inbounds float, float* [[X]], i64 45
+; CHECK-NEXT:    [[ARRAYIDX_46:%.*]] = getelementptr inbounds float, float* [[X]], i64 46
+; CHECK-NEXT:    [[ARRAYIDX_47:%.*]] = getelementptr inbounds float, float* [[X]], i64 47
+; CHECK-NEXT:    [[TMP16:%.*]] = bitcast float* [[ARRAYIDX_16]] to <32 x float>*
+; CHECK-NEXT:    [[TMP17:%.*]] = load <32 x float>, <32 x float>* [[TMP16]], align 4
+; CHECK-NEXT:    [[ADD_16:%.*]] = fadd fast float undef, [[ADD_15]]
+; CHECK-NEXT:    [[ADD_17:%.*]] = fadd fast float undef, [[ADD_16]]
+; CHECK-NEXT:    [[ADD_18:%.*]] = fadd fast float undef, [[ADD_17]]
+; CHECK-NEXT:    [[ADD_19:%.*]] = fadd fast float undef, [[ADD_18]]
+; CHECK-NEXT:    [[ADD_20:%.*]] = fadd fast float undef, [[ADD_19]]
+; CHECK-NEXT:    [[ADD_21:%.*]] = fadd fast float undef, [[ADD_20]]
+; CHECK-NEXT:    [[ADD_22:%.*]] = fadd fast float undef, [[ADD_21]]
+; CHECK-NEXT:    [[ADD_23:%.*]] = fadd fast float undef, [[ADD_22]]
+; CHECK-NEXT:    [[ADD_24:%.*]] = fadd fast float undef, [[ADD_23]]
+; CHECK-NEXT:    [[ADD_25:%.*]] = fadd fast float undef, [[ADD_24]]
+; CHECK-NEXT:    [[ADD_26:%.*]] = fadd fast float undef, [[ADD_25]]
+; CHECK-NEXT:    [[ADD_27:%.*]] = fadd fast float undef, [[ADD_26]]
+; CHECK-NEXT:    [[ADD_28:%.*]] = fadd fast float undef, [[ADD_27]]
+; CHECK-NEXT:    [[ADD_29:%.*]] = fadd fast float undef, [[ADD_28]]
+; CHECK-NEXT:    [[ADD_30:%.*]] = fadd fast float undef, [[ADD_29]]
+; CHECK-NEXT:    [[ADD_31:%.*]] = fadd fast float undef, [[ADD_30]]
+; CHECK-NEXT:    [[ADD_32:%.*]] = fadd fast float undef, [[ADD_31]]
+; CHECK-NEXT:    [[ADD_33:%.*]] = fadd fast float undef, [[ADD_32]]
+; CHECK-NEXT:    [[ADD_34:%.*]] = fadd fast float undef, [[ADD_33]]
+; CHECK-NEXT:    [[ADD_35:%.*]] = fadd fast float undef, [[ADD_34]]
+; CHECK-NEXT:    [[ADD_36:%.*]] = fadd fast float undef, [[ADD_35]]
+; CHECK-NEXT:    [[ADD_37:%.*]] = fadd fast float undef, [[ADD_36]]
+; CHECK-NEXT:    [[ADD_38:%.*]] = fadd fast float undef, [[ADD_37]]
+; CHECK-NEXT:    [[ADD_39:%.*]] = fadd fast float undef, [[ADD_38]]
+; CHECK-NEXT:    [[ADD_40:%.*]] = fadd fast float undef, [[ADD_39]]
+; CHECK-NEXT:    [[ADD_41:%.*]] = fadd fast float undef, [[ADD_40]]
+; CHECK-NEXT:    [[ADD_42:%.*]] = fadd fast float undef, [[ADD_41]]
+; CHECK-NEXT:    [[ADD_43:%.*]] = fadd fast float undef, [[ADD_42]]
+; CHECK-NEXT:    [[ADD_44:%.*]] = fadd fast float undef, [[ADD_43]]
+; CHECK-NEXT:    [[ADD_45:%.*]] = fadd fast float undef, [[ADD_44]]
+; CHECK-NEXT:    [[ADD_46:%.*]] = fadd fast float undef, [[ADD_45]]
+; CHECK-NEXT:    [[RDX_SHUF:%.*]] = shufflevector <32 x float> [[TMP17]], <32 x float> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX:%.*]] = fadd fast <32 x float> [[TMP17]], [[RDX_SHUF]]
+; CHECK-NEXT:    [[RDX_SHUF1:%.*]] = shufflevector <32 x float> [[BIN_RDX]], <32 x float> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX2:%.*]] = fadd fast <32 x float> [[BIN_RDX]], [[RDX_SHUF1]]
+; CHECK-NEXT:    [[RDX_SHUF3:%.*]] = shufflevector <32 x float> [[BIN_RDX2]], <32 x float> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX4:%.*]] = fadd fast <32 x float> [[BIN_RDX2]], [[RDX_SHUF3]]
+; CHECK-NEXT:    [[RDX_SHUF5:%.*]] = shufflevector <32 x float> [[BIN_RDX4]], <32 x float> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX6:%.*]] = fadd fast <32 x float> [[BIN_RDX4]], [[RDX_SHUF5]]
+; CHECK-NEXT:    [[RDX_SHUF7:%.*]] = shufflevector <32 x float> [[BIN_RDX6]], <32 x float> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+; CHECK-NEXT:    [[BIN_RDX8:%.*]] = fadd fast <32 x float> [[BIN_RDX6]], [[RDX_SHUF7]]
+; CHECK-NEXT:    [[TMP18:%.*]] = extractelement <32 x float> [[BIN_RDX8]], i32 0
+; CHECK-NEXT:    [[TMP19:%.*]] = fadd fast float [[TMP18]], [[TMP15]]
+; CHECK-NEXT:    [[TMP20:%.*]] = fadd fast float [[TMP19]], [[TMP14]]
+; CHECK-NEXT:    [[TMP21:%.*]] = fadd fast float [[TMP20]], [[TMP13]]
+; CHECK-NEXT:    [[TMP22:%.*]] = fadd fast float [[TMP21]], [[TMP12]]
+; CHECK-NEXT:    [[TMP23:%.*]] = fadd fast float [[TMP22]], [[TMP11]]
+; CHECK-NEXT:    [[TMP24:%.*]] = fadd fast float [[TMP23]], [[TMP10]]
+; CHECK-NEXT:    [[TMP25:%.*]] = fadd fast float [[TMP24]], [[TMP9]]
+; CHECK-NEXT:    [[TMP26:%.*]] = fadd fast float [[TMP25]], [[TMP8]]
+; CHECK-NEXT:    [[TMP27:%.*]] = fadd fast float [[TMP26]], [[TMP7]]
+; CHECK-NEXT:    [[TMP28:%.*]] = fadd fast float [[TMP27]], [[TMP6]]
+; CHECK-NEXT:    [[TMP29:%.*]] = fadd fast float [[TMP28]], [[TMP5]]
+; CHECK-NEXT:    [[TMP30:%.*]] = fadd fast float [[TMP29]], [[TMP4]]
+; CHECK-NEXT:    [[TMP31:%.*]] = fadd fast float [[TMP30]], [[TMP3]]
+; CHECK-NEXT:    [[TMP32:%.*]] = fadd fast float [[TMP31]], [[TMP2]]
+; CHECK-NEXT:    [[TMP33:%.*]] = fadd fast float [[TMP32]], [[TMP1]]
+; CHECK-NEXT:    [[TMP34:%.*]] = fadd fast float [[TMP33]], [[TMP0]]
+; CHECK-NEXT:    [[ADD_47:%.*]] = fadd fast float undef, [[ADD_46]]
+; CHECK-NEXT:    ret float [[TMP34]]
+;
+  entry:
+  %0 = load float, float* %x, align 4
+  %arrayidx.1 = getelementptr inbounds float, float* %x, i64 1
+  %1 = load float, float* %arrayidx.1, align 4
+  %add.1 = fadd fast float %1, %0
+  %arrayidx.2 = getelementptr inbounds float, float* %x, i64 2
+  %2 = load float, float* %arrayidx.2, align 4
+  %add.2 = fadd fast float %2, %add.1
+  %arrayidx.3 = getelementptr inbounds float, float* %x, i64 3
+  %3 = load float, float* %arrayidx.3, align 4
+  %add.3 = fadd fast float %3, %add.2
+  %arrayidx.4 = getelementptr inbounds float, float* %x, i64 4
+  %4 = load float, float* %arrayidx.4, align 4
+  %add.4 = fadd fast float %4, %add.3
+  %arrayidx.5 = getelementptr inbounds float, float* %x, i64 5
+  %5 = load float, float* %arrayidx.5, align 4
+  %add.5 = fadd fast float %5, %add.4
+  %arrayidx.6 = getelementptr inbounds float, float* %x, i64 6
+  %6 = load float, float* %arrayidx.6, align 4
+  %add.6 = fadd fast float %6, %add.5
+  %arrayidx.7 = getelementptr inbounds float, float* %x, i64 7
+  %7 = load float, float* %arrayidx.7, align 4
+  %add.7 = fadd fast float %7, %add.6
+  %arrayidx.8 = getelementptr inbounds float, float* %x, i64 8
+  %8 = load float, float* %arrayidx.8, align 4
+  %add.8 = fadd fast float %8, %add.7
+  %arrayidx.9 = getelementptr inbounds float, float* %x, i64 9
+  %9 = load float, float* %arrayidx.9, align 4
+  %add.9 = fadd fast float %9, %add.8
+  %arrayidx.10 = getelementptr inbounds float, float* %x, i64 10
+  %10 = load float, float* %arrayidx.10, align 4
+  %add.10 = fadd fast float %10, %add.9
+  %arrayidx.11 = getelementptr inbounds float, float* %x, i64 11
+  %11 = load float, float* %arrayidx.11, align 4
+  %add.11 = fadd fast float %11, %add.10
+  %arrayidx.12 = getelementptr inbounds float, float* %x, i64 12
+  %12 = load float, float* %arrayidx.12, align 4
+  %add.12 = fadd fast float %12, %add.11
+  %arrayidx.13 = getelementptr inbounds float, float* %x, i64 13
+  %13 = load float, float* %arrayidx.13, align 4
+  %add.13 = fadd fast float %13, %add.12
+  %arrayidx.14 = getelementptr inbounds float, float* %x, i64 14
+  %14 = load float, float* %arrayidx.14, align 4
+  %add.14 = fadd fast float %14, %add.13
+  %arrayidx.15 = getelementptr inbounds float, float* %x, i64 15
+  %15 = load float, float* %arrayidx.15, align 4
+  %add.15 = fadd fast float %15, %add.14
+  %arrayidx.16 = getelementptr inbounds float, float* %x, i64 16
+  %16 = load float, float* %arrayidx.16, align 4
+  %add.16 = fadd fast float %16, %add.15
+  %arrayidx.17 = getelementptr inbounds float, float* %x, i64 17
+  %17 = load float, float* %arrayidx.17, align 4
+  %add.17 = fadd fast float %17, %add.16
+  %arrayidx.18 = getelementptr inbounds float, float* %x, i64 18
+  %18 = load float, float* %arrayidx.18, align 4
+  %add.18 = fadd fast float %18, %add.17
+  %arrayidx.19 = getelementptr inbounds float, float* %x, i64 19
+  %19 = load float, float* %arrayidx.19, align 4
+  %add.19 = fadd fast float %19, %add.18
+  %arrayidx.20 = getelementptr inbounds float, float* %x, i64 20
+  %20 = load float, float* %arrayidx.20, align 4
+  %add.20 = fadd fast float %20, %add.19
+  %arrayidx.21 = getelementptr inbounds float, float* %x, i64 21
+  %21 = load float, float* %arrayidx.21, align 4
+  %add.21 = fadd fast float %21, %add.20
+  %arrayidx.22 = getelementptr inbounds float, float* %x, i64 22
+  %22 = load float, float* %arrayidx.22, align 4
+  %add.22 = fadd fast float %22, %add.21
+  %arrayidx.23 = getelementptr inbounds float, float* %x, i64 23
+  %23 = load float, float* %arrayidx.23, align 4
+  %add.23 = fadd fast float %23, %add.22
+  %arrayidx.24 = getelementptr inbounds float, float* %x, i64 24
+  %24 = load float, float* %arrayidx.24, align 4
+  %add.24 = fadd fast float %24, %add.23
+  %arrayidx.25 = getelementptr inbounds float, float* %x, i64 25
+  %25 = load float, float* %arrayidx.25, align 4
+  %add.25 = fadd fast float %25, %add.24
+  %arrayidx.26 = getelementptr inbounds float, float* %x, i64 26
+  %26 = load float, float* %arrayidx.26, align 4
+  %add.26 = fadd fast float %26, %add.25
+  %arrayidx.27 = getelementptr inbounds float, float* %x, i64 27
+  %27 = load float, float* %arrayidx.27, align 4
+  %add.27 = fadd fast float %27, %add.26
+  %arrayidx.28 = getelementptr inbounds float, float* %x, i64 28
+  %28 = load float, float* %arrayidx.28, align 4
+  %add.28 = fadd fast float %28, %add.27
+  %arrayidx.29 = getelementptr inbounds float, float* %x, i64 29
+  %29 = load float, float* %arrayidx.29, align 4
+  %add.29 = fadd fast float %29, %add.28
+  %arrayidx.30 = getelementptr inbounds float, float* %x, i64 30
+  %30 = load float, float* %arrayidx.30, align 4
+  %add.30 = fadd fast float %30, %add.29
+  %arrayidx.31 = getelementptr inbounds float, float* %x, i64 31
+  %31 = load float, float* %arrayidx.31, align 4
+  %add.31 = fadd fast float %31, %add.30
+  %arrayidx.32 = getelementptr inbounds float, float* %x, i64 32
+  %32 = load float, float* %arrayidx.32, align 4
+  %add.32 = fadd fast float %32, %add.31
+  %arrayidx.33 = getelementptr inbounds float, float* %x, i64 33
+  %33 = load float, float* %arrayidx.33, align 4
+  %add.33 = fadd fast float %33, %add.32
+  %arrayidx.34 = getelementptr inbounds float, float* %x, i64 34
+  %34 = load float, float* %arrayidx.34, align 4
+  %add.34 = fadd fast float %34, %add.33
+  %arrayidx.35 = getelementptr inbounds float, float* %x, i64 35
+  %35 = load float, float* %arrayidx.35, align 4
+  %add.35 = fadd fast float %35, %add.34
+  %arrayidx.36 = getelementptr inbounds float, float* %x, i64 36
+  %36 = load float, float* %arrayidx.36, align 4
+  %add.36 = fadd fast float %36, %add.35
+  %arrayidx.37 = getelementptr inbounds float, float* %x, i64 37
+  %37 = load float, float* %arrayidx.37, align 4
+  %add.37 = fadd fast float %37, %add.36
+  %arrayidx.38 = getelementptr inbounds float, float* %x, i64 38
+  %38 = load float, float* %arrayidx.38, align 4
+  %add.38 = fadd fast float %38, %add.37
+  %arrayidx.39 = getelementptr inbounds float, float* %x, i64 39
+  %39 = load float, float* %arrayidx.39, align 4
+  %add.39 = fadd fast float %39, %add.38
+  %arrayidx.40 = getelementptr inbounds float, float* %x, i64 40
+  %40 = load float, float* %arrayidx.40, align 4
+  %add.40 = fadd fast float %40, %add.39
+  %arrayidx.41 = getelementptr inbounds float, float* %x, i64 41
+  %41 = load float, float* %arrayidx.41, align 4
+  %add.41 = fadd fast float %41, %add.40
+  %arrayidx.42 = getelementptr inbounds float, float* %x, i64 42
+  %42 = load float, float* %arrayidx.42, align 4
+  %add.42 = fadd fast float %42, %add.41
+  %arrayidx.43 = getelementptr inbounds float, float* %x, i64 43
+  %43 = load float, float* %arrayidx.43, align 4
+  %add.43 = fadd fast float %43, %add.42
+  %arrayidx.44 = getelementptr inbounds float, float* %x, i64 44
+  %44 = load float, float* %arrayidx.44, align 4
+  %add.44 = fadd fast float %44, %add.43
+  %arrayidx.45 = getelementptr inbounds float, float* %x, i64 45
+  %45 = load float, float* %arrayidx.45, align 4
+  %add.45 = fadd fast float %45, %add.44
+  %arrayidx.46 = getelementptr inbounds float, float* %x, i64 46
+  %46 = load float, float* %arrayidx.46, align 4
+  %add.46 = fadd fast float %46, %add.45
+  %arrayidx.47 = getelementptr inbounds float, float* %x, i64 47
+  %47 = load float, float* %arrayidx.47, align 4
+  %add.47 = fadd fast float %47, %add.46
+  ret float %add.47
+}
+




More information about the llvm-commits mailing list