[llvm] r370506 - [X86] Add test caes for opportunities for machine LICM to unfold broadcasted constant pool loads.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 30 12:26:06 PDT 2019


Author: ctopper
Date: Fri Aug 30 12:26:06 2019
New Revision: 370506

URL: http://llvm.org/viewvc/llvm-project?rev=370506&view=rev
Log:
[X86] Add test caes for opportunities for machine LICM to unfold broadcasted constant pool loads.

MachineLICM is able to unfold loads to move an invariant load out
a loop, but X86 infrastructure currently lacks the ability to do
this when avx512 embedded broadcasting is used.

This test adds examples for the basic float point operations,
add, mul, and, or, and xor.

Added:
    llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll

Added: llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll?rev=370506&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll (added)
+++ llvm/trunk/test/CodeGen/X86/avx512-broadcast-unfold.ll Fri Aug 30 12:26:06 2019
@@ -0,0 +1,1844 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-pc-linux -mattr=avx512vl | FileCheck %s
+
+; Test that we can unfold constant pool loads when we're using avx512's
+; ability to fold a broadcast load into an operation.
+
+define void @bcast_unfold_add_v16i32(i32* %arg) {
+; CHECK-LABEL: bcast_unfold_add_v16i32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB0_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB0_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i32, i32* %arg, i64 %tmp
+  %tmp4 = bitcast i32* %tmp3 to <16 x i32>*
+  %tmp5 = load <16 x i32>, <16 x i32>* %tmp4, align 4
+  %tmp6 = add nsw <16 x i32> %tmp5, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+  %tmp7 = bitcast i32* %tmp3 to <16 x i32>*
+  store <16 x i32> %tmp6, <16 x i32>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 16
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_add_v8i32(i32* %arg) {
+; CHECK-LABEL: bcast_unfold_add_v8i32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB1_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB1_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i32, i32* %arg, i64 %tmp
+  %tmp4 = bitcast i32* %tmp3 to <8 x i32>*
+  %tmp5 = load <8 x i32>, <8 x i32>* %tmp4, align 4
+  %tmp6 = add nsw <8 x i32> %tmp5, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
+  %tmp7 = bitcast i32* %tmp3 to <8 x i32>*
+  store <8 x i32> %tmp6, <8 x i32>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_add_v4i32(i32* %arg) {
+; CHECK-LABEL: bcast_unfold_add_v4i32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB2_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB2_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i32, i32* %arg, i64 %tmp
+  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
+  %tmp5 = load <4 x i32>, <4 x i32>* %tmp4, align 4
+  %tmp6 = add nsw <4 x i32> %tmp5, <i32 2, i32 2, i32 2, i32 2>
+  %tmp7 = bitcast i32* %tmp3 to <4 x i32>*
+  store <4 x i32> %tmp6, <4 x i32>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_add_v8i64(i64* %arg) {
+; CHECK-LABEL: bcast_unfold_add_v8i64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB3_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB3_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i64, i64* %arg, i64 %tmp
+  %tmp4 = bitcast i64* %tmp3 to <8 x i64>*
+  %tmp5 = load <8 x i64>, <8 x i64>* %tmp4, align 8
+  %tmp6 = add nsw <8 x i64> %tmp5, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
+  %tmp7 = bitcast i64* %tmp3 to <8 x i64>*
+  store <8 x i64> %tmp6, <8 x i64>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_add_v4i64(i64* %arg) {
+; CHECK-LABEL: bcast_unfold_add_v4i64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB4_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB4_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i64, i64* %arg, i64 %tmp
+  %tmp4 = bitcast i64* %tmp3 to <4 x i64>*
+  %tmp5 = load <4 x i64>, <4 x i64>* %tmp4, align 8
+  %tmp6 = add nsw <4 x i64> %tmp5, <i64 2, i64 2, i64 2, i64 2>
+  %tmp7 = bitcast i64* %tmp3 to <4 x i64>*
+  store <4 x i64> %tmp6, <4 x i64>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_add_v2i64(i64* %arg) {
+; CHECK-LABEL: bcast_unfold_add_v2i64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB5_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vpaddq {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu %xmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB5_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i64, i64* %arg, i64 %tmp
+  %tmp4 = bitcast i64* %tmp3 to <2 x i64>*
+  %tmp5 = load <2 x i64>, <2 x i64>* %tmp4, align 8
+  %tmp6 = add nsw <2 x i64> %tmp5, <i64 2, i64 2>
+  %tmp7 = bitcast i64* %tmp3 to <2 x i64>*
+  store <2 x i64> %tmp6, <2 x i64>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 2
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_mul_v16i32(i32* %arg) {
+; CHECK-LABEL: bcast_unfold_mul_v16i32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB6_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vpmulld {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB6_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i32, i32* %arg, i64 %tmp
+  %tmp4 = bitcast i32* %tmp3 to <16 x i32>*
+  %tmp5 = load <16 x i32>, <16 x i32>* %tmp4, align 4
+  %tmp6 = mul nsw <16 x i32> %tmp5, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+  %tmp7 = bitcast i32* %tmp3 to <16 x i32>*
+  store <16 x i32> %tmp6, <16 x i32>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 16
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_mul_v8i32(i32* %arg) {
+; CHECK-LABEL: bcast_unfold_mul_v8i32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB7_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vpmulld {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB7_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i32, i32* %arg, i64 %tmp
+  %tmp4 = bitcast i32* %tmp3 to <8 x i32>*
+  %tmp5 = load <8 x i32>, <8 x i32>* %tmp4, align 4
+  %tmp6 = mul nsw <8 x i32> %tmp5, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+  %tmp7 = bitcast i32* %tmp3 to <8 x i32>*
+  store <8 x i32> %tmp6, <8 x i32>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_mul_v4i32(i32* %arg) {
+; CHECK-LABEL: bcast_unfold_mul_v4i32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB8_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vpmulld {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB8_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i32, i32* %arg, i64 %tmp
+  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
+  %tmp5 = load <4 x i32>, <4 x i32>* %tmp4, align 4
+  %tmp6 = mul nsw <4 x i32> %tmp5, <i32 3, i32 3, i32 3, i32 3>
+  %tmp7 = bitcast i32* %tmp3 to <4 x i32>*
+  store <4 x i32> %tmp6, <4 x i32>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_mul_v8i64(i64* %arg) {
+; CHECK-LABEL: bcast_unfold_mul_v8i64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB9_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vpaddq %zmm0, %zmm0, %zmm1
+; CHECK-NEXT:    vpaddq %zmm1, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB9_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i64, i64* %arg, i64 %tmp
+  %tmp4 = bitcast i64* %tmp3 to <8 x i64>*
+  %tmp5 = load <8 x i64>, <8 x i64>* %tmp4, align 8
+  %tmp6 = mul nsw <8 x i64> %tmp5, <i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3>
+  %tmp7 = bitcast i64* %tmp3 to <8 x i64>*
+  store <8 x i64> %tmp6, <8 x i64>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_mul_v4i64(i64* %arg) {
+; CHECK-LABEL: bcast_unfold_mul_v4i64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB10_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vpaddq %ymm0, %ymm0, %ymm1
+; CHECK-NEXT:    vpaddq %ymm1, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB10_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i64, i64* %arg, i64 %tmp
+  %tmp4 = bitcast i64* %tmp3 to <4 x i64>*
+  %tmp5 = load <4 x i64>, <4 x i64>* %tmp4, align 8
+  %tmp6 = mul nsw <4 x i64> %tmp5, <i64 3, i64 3, i64 3, i64 3>
+  %tmp7 = bitcast i64* %tmp3 to <4 x i64>*
+  store <4 x i64> %tmp6, <4 x i64>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_mul_v2i64(i64* %arg) {
+; CHECK-LABEL: bcast_unfold_mul_v2i64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB11_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vpaddq %xmm0, %xmm0, %xmm1
+; CHECK-NEXT:    vpaddq %xmm1, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu %xmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB11_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i64, i64* %arg, i64 %tmp
+  %tmp4 = bitcast i64* %tmp3 to <2 x i64>*
+  %tmp5 = load <2 x i64>, <2 x i64>* %tmp4, align 8
+  %tmp6 = mul nsw <2 x i64> %tmp5, <i64 3, i64 3>
+  %tmp7 = bitcast i64* %tmp3 to <2 x i64>*
+  store <2 x i64> %tmp6, <2 x i64>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 2
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_or_v16i32(i32* %arg) {
+; CHECK-LABEL: bcast_unfold_or_v16i32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB12_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vpord {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB12_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i32, i32* %arg, i64 %tmp
+  %tmp4 = bitcast i32* %tmp3 to <16 x i32>*
+  %tmp5 = load <16 x i32>, <16 x i32>* %tmp4, align 4
+  %tmp6 = or <16 x i32> %tmp5, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+  %tmp7 = bitcast i32* %tmp3 to <16 x i32>*
+  store <16 x i32> %tmp6, <16 x i32>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 16
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_or_v8i32(i32* %arg) {
+; CHECK-LABEL: bcast_unfold_or_v8i32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB13_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vpord {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB13_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i32, i32* %arg, i64 %tmp
+  %tmp4 = bitcast i32* %tmp3 to <8 x i32>*
+  %tmp5 = load <8 x i32>, <8 x i32>* %tmp4, align 4
+  %tmp6 = or <8 x i32> %tmp5, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
+  %tmp7 = bitcast i32* %tmp3 to <8 x i32>*
+  store <8 x i32> %tmp6, <8 x i32>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_or_v4i32(i32* %arg) {
+; CHECK-LABEL: bcast_unfold_or_v4i32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB14_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vpord {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB14_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i32, i32* %arg, i64 %tmp
+  %tmp4 = bitcast i32* %tmp3 to <4 x i32>*
+  %tmp5 = load <4 x i32>, <4 x i32>* %tmp4, align 4
+  %tmp6 = or <4 x i32> %tmp5, <i32 3, i32 3, i32 3, i32 3>
+  %tmp7 = bitcast i32* %tmp3 to <4 x i32>*
+  store <4 x i32> %tmp6, <4 x i32>* %tmp7, align 4
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_or_v8i64(i64* %arg) {
+; CHECK-LABEL: bcast_unfold_or_v8i64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB15_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vporq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB15_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i64, i64* %arg, i64 %tmp
+  %tmp4 = bitcast i64* %tmp3 to <8 x i64>*
+  %tmp5 = load <8 x i64>, <8 x i64>* %tmp4, align 8
+  %tmp6 = or <8 x i64> %tmp5, <i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3, i64 3>
+  %tmp7 = bitcast i64* %tmp3 to <8 x i64>*
+  store <8 x i64> %tmp6, <8 x i64>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 8
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_or_v4i64(i64* %arg) {
+; CHECK-LABEL: bcast_unfold_or_v4i64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB16_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vporq {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB16_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i64, i64* %arg, i64 %tmp
+  %tmp4 = bitcast i64* %tmp3 to <4 x i64>*
+  %tmp5 = load <4 x i64>, <4 x i64>* %tmp4, align 8
+  %tmp6 = or <4 x i64> %tmp5, <i64 3, i64 3, i64 3, i64 3>
+  %tmp7 = bitcast i64* %tmp3 to <4 x i64>*
+  store <4 x i64> %tmp6, <4 x i64>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 4
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_or_v2i64(i64* %arg) {
+; CHECK-LABEL: bcast_unfold_or_v2i64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB17_1: # %bb2
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 8192(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vorps {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB17_1
+; CHECK-NEXT:  # %bb.2: # %bb10
+; CHECK-NEXT:    retq
+bb:
+  br label %bb2
+
+bb2:                                              ; preds = %bb2, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp8, %bb2 ]
+  %tmp3 = getelementptr inbounds i64, i64* %arg, i64 %tmp
+  %tmp4 = bitcast i64* %tmp3 to <2 x i64>*
+  %tmp5 = load <2 x i64>, <2 x i64>* %tmp4, align 8
+  %tmp6 = or <2 x i64> %tmp5, <i64 3, i64 3>
+  %tmp7 = bitcast i64* %tmp3 to <2 x i64>*
+  store <2 x i64> %tmp6, <2 x i64>* %tmp7, align 8
+  %tmp8 = add i64 %tmp, 2
+  %tmp9 = icmp eq i64 %tmp8, 1024
+  br i1 %tmp9, label %bb10, label %bb2
+
+bb10:                                             ; preds = %bb2
+  ret void
+}
+
+define void @bcast_unfold_fneg_v16f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fneg_v16f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB18_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vpxord {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB18_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <16 x float>*
+  %tmp4 = load <16 x float>, <16 x float>* %tmp3, align 4
+  %tmp5 = fneg <16 x float> %tmp4
+  %tmp6 = bitcast float* %tmp2 to <16 x float>*
+  store <16 x float> %tmp5, <16 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 16
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fneg_v8f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fneg_v8f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB19_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vpxord {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB19_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <8 x float>*
+  %tmp4 = load <8 x float>, <8 x float>* %tmp3, align 4
+  %tmp5 = fneg <8 x float> %tmp4
+  %tmp6 = bitcast float* %tmp2 to <8 x float>*
+  store <8 x float> %tmp5, <8 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fneg_v4f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fneg_v4f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB20_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vpxord {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB20_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <4 x float>*
+  %tmp4 = load <4 x float>, <4 x float>* %tmp3, align 4
+  %tmp5 = fneg <4 x float> %tmp4
+  %tmp6 = bitcast float* %tmp2 to <4 x float>*
+  store <4 x float> %tmp5, <4 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fneg_v8f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fneg_v8f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB21_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vpxorq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB21_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <8 x double>*
+  %tmp4 = load <8 x double>, <8 x double>* %tmp3, align 8
+  %tmp5 = fneg <8 x double> %tmp4
+  %tmp6 = bitcast double* %tmp2 to <8 x double>*
+  store <8 x double> %tmp5, <8 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fneg_v4f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fneg_v4f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB22_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vpxorq {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB22_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <4 x double>*
+  %tmp4 = load <4 x double>, <4 x double>* %tmp3, align 8
+  %tmp5 = fneg <4 x double> %tmp4
+  %tmp6 = bitcast double* %tmp2 to <4 x double>*
+  store <4 x double> %tmp5, <4 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fneg_v2f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fneg_v2f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB23_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 8192(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vxorps {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB23_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <2 x double>*
+  %tmp4 = load <2 x double>, <2 x double>* %tmp3, align 8
+  %tmp5 = fneg <2 x double> %tmp4
+  %tmp6 = bitcast double* %tmp2 to <2 x double>*
+  store <2 x double> %tmp5, <2 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 2
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fabs_v16f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fabs_v16f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB24_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vpandd {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB24_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <16 x float>*
+  %tmp4 = load <16 x float>, <16 x float>* %tmp3, align 4
+  %tmp5 = call <16 x float> @llvm.fabs.v16f32(<16 x float> %tmp4)
+  %tmp6 = bitcast float* %tmp2 to <16 x float>*
+  store <16 x float> %tmp5, <16 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 16
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare <16 x float> @llvm.fabs.v16f32(<16 x float>) #0
+
+define void @bcast_unfold_fabs_v8f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fabs_v8f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB25_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vpandd {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB25_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <8 x float>*
+  %tmp4 = load <8 x float>, <8 x float>* %tmp3, align 4
+  %tmp5 = call <8 x float> @llvm.fabs.v8f32(<8 x float> %tmp4)
+  %tmp6 = bitcast float* %tmp2 to <8 x float>*
+  store <8 x float> %tmp5, <8 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare <8 x float> @llvm.fabs.v8f32(<8 x float>) #0
+
+define void @bcast_unfold_fabs_v4f32(float* %arg) {
+; CHECK-LABEL: bcast_unfold_fabs_v4f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB26_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vpandd {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovdqu %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB26_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <4 x float>*
+  %tmp4 = load <4 x float>, <4 x float>* %tmp3, align 4
+  %tmp5 = call <4 x float> @llvm.fabs.v4f32(<4 x float> %tmp4)
+  %tmp6 = bitcast float* %tmp2 to <4 x float>*
+  store <4 x float> %tmp5, <4 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare <4 x float> @llvm.fabs.v4f32(<4 x float>) #0
+
+define void @bcast_unfold_fabs_v8f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fabs_v8f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB27_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu64 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vpandq {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovdqu64 %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB27_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <8 x double>*
+  %tmp4 = load <8 x double>, <8 x double>* %tmp3, align 8
+  %tmp5 = call <8 x double> @llvm.fabs.v8f64(<8 x double> %tmp4)
+  %tmp6 = bitcast double* %tmp2 to <8 x double>*
+  store <8 x double> %tmp5, <8 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare <8 x double> @llvm.fabs.v8f64(<8 x double>) #0
+
+define void @bcast_unfold_fabs_v4f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fabs_v4f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB28_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovdqu 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vpandq {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovdqu %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB28_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <4 x double>*
+  %tmp4 = load <4 x double>, <4 x double>* %tmp3, align 8
+  %tmp5 = call <4 x double> @llvm.fabs.v4f64(<4 x double> %tmp4)
+  %tmp6 = bitcast double* %tmp2 to <4 x double>*
+  store <4 x double> %tmp5, <4 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare <4 x double> @llvm.fabs.v4f64(<4 x double>) #0
+
+define void @bcast_unfold_fabs_v2f64(double* %arg) {
+; CHECK-LABEL: bcast_unfold_fabs_v2f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB29_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 8192(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vandps {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB29_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <2 x double>*
+  %tmp4 = load <2 x double>, <2 x double>* %tmp3, align 8
+  %tmp5 = call <2 x double> @llvm.fabs.v2f64(<2 x double> %tmp4)
+  %tmp6 = bitcast double* %tmp2 to <2 x double>*
+  store <2 x double> %tmp5, <2 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 2
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare <2 x double> @llvm.fabs.v2f64(<2 x double>) #0
+
+define void @bcast_unfold_fadd_v16f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fadd_v16f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB30_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovups %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB30_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <16 x float>*
+  %tmp4 = load <16 x float>, <16 x float>* %tmp3, align 4
+  %tmp5 = fadd <16 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <16 x float>*
+  store <16 x float> %tmp5, <16 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 16
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fadd_v8f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fadd_v8f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB31_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovups %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB31_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <8 x float>*
+  %tmp4 = load <8 x float>, <8 x float>* %tmp3, align 4
+  %tmp5 = fadd <8 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <8 x float>*
+  store <8 x float> %tmp5, <8 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fadd_v4f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fadd_v4f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB32_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB32_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <4 x float>*
+  %tmp4 = load <4 x float>, <4 x float>* %tmp3, align 4
+  %tmp5 = fadd <4 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <4 x float>*
+  store <4 x float> %tmp5, <4 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fadd_v8f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fadd_v8f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB33_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vaddpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovupd %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB33_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <8 x double>*
+  %tmp4 = load <8 x double>, <8 x double>* %tmp3, align 8
+  %tmp5 = fadd <8 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <8 x double>*
+  store <8 x double> %tmp5, <8 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fadd_v4f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fadd_v4f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB34_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vaddpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovupd %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB34_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <4 x double>*
+  %tmp4 = load <4 x double>, <4 x double>* %tmp3, align 8
+  %tmp5 = fadd <4 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <4 x double>*
+  store <4 x double> %tmp5, <4 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fadd_v2f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fadd_v2f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB35_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vaddpd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT:    vmovupd %xmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB35_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <2 x double>*
+  %tmp4 = load <2 x double>, <2 x double>* %tmp3, align 8
+  %tmp5 = fadd <2 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <2 x double>*
+  store <2 x double> %tmp5, <2 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 2
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fsub_v16f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fsub_v16f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB36_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vaddps {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovups %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB36_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <16 x float>*
+  %tmp4 = load <16 x float>, <16 x float>* %tmp3, align 4
+  %tmp5 = fsub <16 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <16 x float>*
+  store <16 x float> %tmp5, <16 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 16
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fsub_v8f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fsub_v8f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB37_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovups %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB37_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <8 x float>*
+  %tmp4 = load <8 x float>, <8 x float>* %tmp3, align 4
+  %tmp5 = fsub <8 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <8 x float>*
+  store <8 x float> %tmp5, <8 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fsub_v4f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fsub_v4f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB38_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB38_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <4 x float>*
+  %tmp4 = load <4 x float>, <4 x float>* %tmp3, align 4
+  %tmp5 = fsub <4 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <4 x float>*
+  store <4 x float> %tmp5, <4 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fsub_v8f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fsub_v8f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB39_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vaddpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovupd %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB39_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <8 x double>*
+  %tmp4 = load <8 x double>, <8 x double>* %tmp3, align 8
+  %tmp5 = fsub <8 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <8 x double>*
+  store <8 x double> %tmp5, <8 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fsub_v4f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fsub_v4f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB40_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vaddpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovupd %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB40_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <4 x double>*
+  %tmp4 = load <4 x double>, <4 x double>* %tmp3, align 8
+  %tmp5 = fsub <4 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <4 x double>*
+  store <4 x double> %tmp5, <4 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fsub_v2f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fsub_v2f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB41_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vaddpd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT:    vmovupd %xmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB41_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <2 x double>*
+  %tmp4 = load <2 x double>, <2 x double>* %tmp3, align 8
+  %tmp5 = fsub <2 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <2 x double>*
+  store <2 x double> %tmp5, <2 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 2
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmul_v16f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fmul_v16f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB42_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vaddps %zmm0, %zmm0, %zmm0
+; CHECK-NEXT:    vmovups %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB42_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <16 x float>*
+  %tmp4 = load <16 x float>, <16 x float>* %tmp3, align 4
+  %tmp5 = fmul <16 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <16 x float>*
+  store <16 x float> %tmp5, <16 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 16
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmul_v8f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fmul_v8f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB43_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vaddps %ymm0, %ymm0, %ymm0
+; CHECK-NEXT:    vmovups %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB43_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <8 x float>*
+  %tmp4 = load <8 x float>, <8 x float>* %tmp3, align 4
+  %tmp5 = fmul <8 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <8 x float>*
+  store <8 x float> %tmp5, <8 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmul_v4f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fmul_v4f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB44_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vaddps %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB44_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <4 x float>*
+  %tmp4 = load <4 x float>, <4 x float>* %tmp3, align 4
+  %tmp5 = fmul <4 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <4 x float>*
+  store <4 x float> %tmp5, <4 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmul_v8f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fmul_v8f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB45_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vaddpd %zmm0, %zmm0, %zmm0
+; CHECK-NEXT:    vmovupd %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB45_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <8 x double>*
+  %tmp4 = load <8 x double>, <8 x double>* %tmp3, align 8
+  %tmp5 = fmul <8 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <8 x double>*
+  store <8 x double> %tmp5, <8 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmul_v4f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fmul_v4f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB46_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vaddpd %ymm0, %ymm0, %ymm0
+; CHECK-NEXT:    vmovupd %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB46_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <4 x double>*
+  %tmp4 = load <4 x double>, <4 x double>* %tmp3, align 8
+  %tmp5 = fmul <4 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <4 x double>*
+  store <4 x double> %tmp5, <4 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fmul_v2f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fmul_v2f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB47_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vaddpd %xmm0, %xmm0, %xmm0
+; CHECK-NEXT:    vmovupd %xmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB47_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <2 x double>*
+  %tmp4 = load <2 x double>, <2 x double>* %tmp3, align 8
+  %tmp5 = fmul <2 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <2 x double>*
+  store <2 x double> %tmp5, <2 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 2
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fdiv_v16f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fdiv_v16f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB48_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vdivps {{.*}}(%rip){1to16}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovups %zmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB48_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <16 x float>*
+  %tmp4 = load <16 x float>, <16 x float>* %tmp3, align 4
+  %tmp5 = fdiv <16 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <16 x float>*
+  store <16 x float> %tmp5, <16 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 16
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fdiv_v8f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fdiv_v8f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB49_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vdivps {{.*}}(%rip){1to8}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovups %ymm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB49_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <8 x float>*
+  %tmp4 = load <8 x float>, <8 x float>* %tmp3, align 4
+  %tmp5 = fdiv <8 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <8 x float>*
+  store <8 x float> %tmp5, <8 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fdiv_v4f32(float* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fdiv_v4f32:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-4096, %rax # imm = 0xF000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB50_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovups 4096(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vdivps {{.*}}(%rip){1to4}, %xmm0, %xmm0
+; CHECK-NEXT:    vmovups %xmm0, 4096(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB50_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds float, float* %arg, i64 %tmp
+  %tmp3 = bitcast float* %tmp2 to <4 x float>*
+  %tmp4 = load <4 x float>, <4 x float>* %tmp3, align 4
+  %tmp5 = fdiv <4 x float> %tmp4, <float 2.000000e+00, float 2.000000e+00, float 2.000000e+00, float 2.000000e+00>
+  %tmp6 = bitcast float* %tmp2 to <4 x float>*
+  store <4 x float> %tmp5, <4 x float>* %tmp6, align 4
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fdiv_v8f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fdiv_v8f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB51_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %zmm0
+; CHECK-NEXT:    vdivpd {{.*}}(%rip){1to8}, %zmm0, %zmm0
+; CHECK-NEXT:    vmovupd %zmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $64, %rax
+; CHECK-NEXT:    jne .LBB51_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <8 x double>*
+  %tmp4 = load <8 x double>, <8 x double>* %tmp3, align 8
+  %tmp5 = fdiv <8 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <8 x double>*
+  store <8 x double> %tmp5, <8 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 8
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fdiv_v4f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fdiv_v4f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB52_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %ymm0
+; CHECK-NEXT:    vdivpd {{.*}}(%rip){1to4}, %ymm0, %ymm0
+; CHECK-NEXT:    vmovupd %ymm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $32, %rax
+; CHECK-NEXT:    jne .LBB52_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    vzeroupper
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <4 x double>*
+  %tmp4 = load <4 x double>, <4 x double>* %tmp3, align 8
+  %tmp5 = fdiv <4 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00, double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <4 x double>*
+  store <4 x double> %tmp5, <4 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 4
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}
+
+define void @bcast_unfold_fdiv_v2f64(double* nocapture %arg) {
+; CHECK-LABEL: bcast_unfold_fdiv_v2f64:
+; CHECK:       # %bb.0: # %bb
+; CHECK-NEXT:    movq $-8192, %rax # imm = 0xE000
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB53_1: # %bb1
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vmovupd 8192(%rdi,%rax), %xmm0
+; CHECK-NEXT:    vdivpd {{.*}}(%rip), %xmm0, %xmm0
+; CHECK-NEXT:    vmovupd %xmm0, 8192(%rdi,%rax)
+; CHECK-NEXT:    addq $16, %rax
+; CHECK-NEXT:    jne .LBB53_1
+; CHECK-NEXT:  # %bb.2: # %bb9
+; CHECK-NEXT:    retq
+bb:
+  br label %bb1
+
+bb1:                                              ; preds = %bb1, %bb
+  %tmp = phi i64 [ 0, %bb ], [ %tmp7, %bb1 ]
+  %tmp2 = getelementptr inbounds double, double* %arg, i64 %tmp
+  %tmp3 = bitcast double* %tmp2 to <2 x double>*
+  %tmp4 = load <2 x double>, <2 x double>* %tmp3, align 8
+  %tmp5 = fdiv <2 x double> %tmp4, <double 2.000000e+00, double 2.000000e+00>
+  %tmp6 = bitcast double* %tmp2 to <2 x double>*
+  store <2 x double> %tmp5, <2 x double>* %tmp6, align 8
+  %tmp7 = add i64 %tmp, 2
+  %tmp8 = icmp eq i64 %tmp7, 1024
+  br i1 %tmp8, label %bb9, label %bb1
+
+bb9:                                              ; preds = %bb1
+  ret void
+}




More information about the llvm-commits mailing list