[llvm] r371362 - [X86] Add broadcast load unfolding tests for vmaxps/pd and vminps/pd

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 8 21:24:57 PDT 2019


Author: ctopper
Date: Sun Sep  8 21:24:57 2019
New Revision: 371362

URL: http://llvm.org/viewvc/llvm-project?rev=371362&view=rev
Log:
[X86] Add broadcast load unfolding tests for vmaxps/pd and vminps/pd

Modified:
    llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll

Modified: llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll?rev=371362&r1=371361&r2=371362&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll (original)
+++ llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll Sun Sep  8 21:24:57 2019
@@ -2074,3 +2074,421 @@ bb1:
 bb10:                                             ; preds = %bb1
   ret void
 }
+
+define void @bcast_unfold_fmax_v4f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fmax_v4f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB60_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vmaxps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB60_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <4 x float>*
+  %tmp4 = load <4 x float>, <4 x float>* %tmp3, align 4
+  %tmp5 = fcmp ogt <4 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = select <4 x i1> %tmp5, <4 x float> %tmp4, <4 x float> <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp7 = bitcast float* %tmp2 to <4 x float>*
+  store <4 x float> %tmp6, <4 x float>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmax_v8f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fmax_v8f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB61_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vmaxps {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovups %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB61_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <8 x float>*
+  %tmp4 = load <8 x float>, <8 x float>* %tmp3, align 4
+  %tmp5 = fcmp ogt <8 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = select <8 x i1> %tmp5, <8 x float> %tmp4, <8 x float> <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp7 = bitcast float* %tmp2 to <8 x float>*
+  store <8 x float> %tmp6, <8 x float>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmax_v16f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fmax_v16f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB62_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vmaxps {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovups %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB62_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <16 x float>*
+  %tmp4 = load <16 x float>, <16 x float>* %tmp3, align 4
+  %tmp5 = fcmp ogt <16 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = select <16 x i1> %tmp5, <16 x float> %tmp4, <16 x float> <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp7 = bitcast float* %tmp2 to <16 x float>*
+  store <16 x float> %tmp6, <16 x float>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 16
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmax_v2f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fmax_v2f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    vmovapd {{.*#+}} xmm0 = [2.0E+0,2.0E+0]
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB63_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm1
+; CHECK-NEXT:    vmaxpd %xmm0, %xmm1, %xmm1
+; CHECK-NEXT:    vmovupd %xmm1, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB63_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <2 x double>*
+  %tmp4 = load <2 x double>, <2 x double>* %tmp3, align 8
+  %tmp5 = fcmp ogt <2 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = select <2 x i1> %tmp5, <2 x double> %tmp4, <2 x double> <double 2.000000e+00, double 2.000000e+00>
+  %tmp7 = bitcast double* %tmp2 to <2 x double>*
+  store <2 x double> %tmp6, <2 x double>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 2
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmax_v4f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fmax_v4f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB64_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vmaxpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovupd %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB64_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <4 x double>*
+  %tmp4 = load <4 x double>, <4 x double>* %tmp3, align 8
+  %tmp5 = fcmp ogt <4 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = select <4 x i1> %tmp5, <4 x double> %tmp4, <4 x double> <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp7 = bitcast double* %tmp2 to <4 x double>*
+  store <4 x double> %tmp6, <4 x double>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmax_v8f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fmax_v8f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB65_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vmaxpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovupd %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB65_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <8 x double>*
+  %tmp4 = load <8 x double>, <8 x double>* %tmp3, align 8
+  %tmp5 = fcmp ogt <8 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = select <8 x i1> %tmp5, <8 x double> %tmp4, <8 x double> <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp7 = bitcast double* %tmp2 to <8 x double>*
+  store <8 x double> %tmp6, <8 x double>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmin_v4f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fmin_v4f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB66_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vminps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB66_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <4 x float>*
+  %tmp4 = load <4 x float>, <4 x float>* %tmp3, align 4
+  %tmp5 = fcmp olt <4 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = select <4 x i1> %tmp5, <4 x float> %tmp4, <4 x float> <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp7 = bitcast float* %tmp2 to <4 x float>*
+  store <4 x float> %tmp6, <4 x float>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmin_v8f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fmin_v8f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB67_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vminps {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovups %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB67_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <8 x float>*
+  %tmp4 = load <8 x float>, <8 x float>* %tmp3, align 4
+  %tmp5 = fcmp olt <8 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = select <8 x i1> %tmp5, <8 x float> %tmp4, <8 x float> <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp7 = bitcast float* %tmp2 to <8 x float>*
+  store <8 x float> %tmp6, <8 x float>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmin_v16f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fmin_v16f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB68_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vminps {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovups %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB68_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <16 x float>*
+  %tmp4 = load <16 x float>, <16 x float>* %tmp3, align 4
+  %tmp5 = fcmp olt <16 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = select <16 x i1> %tmp5, <16 x float> %tmp4, <16 x float> <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp7 = bitcast float* %tmp2 to <16 x float>*
+  store <16 x float> %tmp6, <16 x float>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 16
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmin_v2f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fmin_v2f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    vmovapd {{.*#+}} xmm0 = [2.0E+0,2.0E+0]
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB69_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm1
+; CHECK-NEXT:    vminpd %xmm0, %xmm1, %xmm1
+; CHECK-NEXT:    vmovupd %xmm1, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB69_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <2 x double>*
+  %tmp4 = load <2 x double>, <2 x double>* %tmp3, align 8
+  %tmp5 = fcmp olt <2 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = select <2 x i1> %tmp5, <2 x double> %tmp4, <2 x double> <double 2.000000e+00, double 2.000000e+00>
+  %tmp7 = bitcast double* %tmp2 to <2 x double>*
+  store <2 x double> %tmp6, <2 x double>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 2
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmin_v4f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fmin_v4f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB70_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vminpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovupd %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB70_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <4 x double>*
+  %tmp4 = load <4 x double>, <4 x double>* %tmp3, align 8
+  %tmp5 = fcmp olt <4 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = select <4 x i1> %tmp5, <4 x double> %tmp4, <4 x double> <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp7 = bitcast double* %tmp2 to <4 x double>*
+  store <4 x double> %tmp6, <4 x double>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmin_v8f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fmin_v8f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB71_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vminpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovupd %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB71_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <8 x double>*
+  %tmp4 = load <8 x double>, <8 x double>* %tmp3, align 8
+  %tmp5 = fcmp olt <8 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = select <8 x i1> %tmp5, <8 x double> %tmp4, <8 x double> <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp7 = bitcast double* %tmp2 to <8 x double>*
+  store <8 x double> %tmp6, <8 x double>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb1
+
+bb10:                                             ; preds = %bb1
+  ret void
+}




More information about the llvm-commits mailing list