[llvm] 890027b - [RISCV] Add test cases showing failure to use .vf vector operations when splat is in another basic block. NFC

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 20 10:26:22 PDT 2021


Author: Craig Topper
Date: 2021-09-20T10:25:38-07:00
New Revision: 890027b31433311515906633518e1295293ac15c

URL: https://github.com/llvm/llvm-project/commit/890027b31433311515906633518e1295293ac15c
DIFF: https://github.com/llvm/llvm-project/commit/890027b31433311515906633518e1295293ac15c.diff

LOG: [RISCV] Add test cases showing failure to use .vf vector operations when splat is in another basic block. NFC

We should have CGP copy the splats into the same basic block as the
FP operation so that SelectionDAG can fold them.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 5c32a011e0d0..ad25e303d381 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-v \
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+experimental-v,+f \
 ; RUN:     -riscv-v-vector-bits-min=128 | FileCheck %s
 
 define void @sink_splat_mul(i32* nocapture %a, i32 signext %x) {
@@ -460,7 +460,7 @@ define void @sink_splat_rsub_scalable(i32* nocapture %a, i32 signext %x) {
 ; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
 ; CHECK-NEXT:    lw a3, 0(a0)
 ; CHECK-NEXT:    mv a4, a2
-; CHECK-NEXT:    addw a2, a3, a1
+; CHECK-NEXT:    subw a2, a1, a3
 ; CHECK-NEXT:    sw a2, 0(a0)
 ; CHECK-NEXT:    addi a2, a4, 1
 ; CHECK-NEXT:    addi a0, a0, 4
@@ -511,7 +511,7 @@ for.body:                                         ; preds = %for.body.preheader,
   %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
   %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
   %11 = load i32, i32* %arrayidx, align 4
-  %add = add i32 %11, %x
+  %add = sub i32 %x, %11
   store i32 %add, i32* %arrayidx, align 4
   %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
   %cmp.not = icmp eq i64 %indvars.iv.next, 1024
@@ -905,4 +905,796 @@ for.body:                                         ; preds = %for.body.preheader,
   br i1 %cmp.not, label %for.cond.cleanup, label %for.body
 }
 
+define void @sink_splat_fmul(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fmul:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:  .LBB14_1: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vfmul.vv v26, v26, v25
+; CHECK-NEXT:    vse32.v v26, (a0)
+; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    bnez a1, .LBB14_1
+; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds float, float* %a, i64 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fmul <4 x float> %wide.load, %broadcast.splat
+  %3 = bitcast float* %0 to <4 x float>*
+  store <4 x float> %2, <4 x float>* %3, align 4
+  %index.next = add nuw i64 %index, 4
+  %4 = icmp eq i64 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @sink_splat_fdiv(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fdiv:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:  .LBB15_1: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vfdiv.vv v26, v26, v25
+; CHECK-NEXT:    vse32.v v26, (a0)
+; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    bnez a1, .LBB15_1
+; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds float, float* %a, i64 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fdiv <4 x float> %wide.load, %broadcast.splat
+  %3 = bitcast float* %0 to <4 x float>*
+  store <4 x float> %2, <4 x float>* %3, align 4
+  %index.next = add nuw i64 %index, 4
+  %4 = icmp eq i64 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @sink_splat_frdiv(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_frdiv:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:  .LBB16_1: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vfdiv.vv v26, v25, v26
+; CHECK-NEXT:    vse32.v v26, (a0)
+; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    bnez a1, .LBB16_1
+; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds float, float* %a, i64 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fdiv <4 x float> %broadcast.splat, %wide.load
+  %3 = bitcast float* %0 to <4 x float>*
+  store <4 x float> %2, <4 x float>* %3, align 4
+  %index.next = add nuw i64 %index, 4
+  %4 = icmp eq i64 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @sink_splat_fadd(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fadd:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:  .LBB17_1: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vfadd.vv v26, v26, v25
+; CHECK-NEXT:    vse32.v v26, (a0)
+; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    bnez a1, .LBB17_1
+; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds float, float* %a, i64 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fadd <4 x float> %wide.load, %broadcast.splat
+  %3 = bitcast float* %0 to <4 x float>*
+  store <4 x float> %2, <4 x float>* %3, align 4
+  %index.next = add nuw i64 %index, 4
+  %4 = icmp eq i64 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @sink_splat_fsub(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fsub:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:  .LBB18_1: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vfsub.vv v26, v26, v25
+; CHECK-NEXT:    vse32.v v26, (a0)
+; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    bnez a1, .LBB18_1
+; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds float, float* %a, i64 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fsub <4 x float> %wide.load, %broadcast.splat
+  %3 = bitcast float* %0 to <4 x float>*
+  store <4 x float> %2, <4 x float>* %3, align 4
+  %index.next = add nuw i64 %index, 4
+  %4 = icmp eq i64 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @sink_splat_frsub(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_frsub:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    addi a1, zero, 1024
+; CHECK-NEXT:  .LBB19_1: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vle32.v v26, (a0)
+; CHECK-NEXT:    vfsub.vv v26, v25, v26
+; CHECK-NEXT:    vse32.v v26, (a0)
+; CHECK-NEXT:    addi a1, a1, -4
+; CHECK-NEXT:    addi a0, a0, 16
+; CHECK-NEXT:    bnez a1, .LBB19_1
+; CHECK-NEXT:  # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %entry
+  %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+  %0 = getelementptr inbounds float, float* %a, i64 %index
+  %1 = bitcast float* %0 to <4 x float>*
+  %wide.load = load <4 x float>, <4 x float>* %1, align 4
+  %2 = fsub <4 x float> %broadcast.splat, %wide.load
+  %3 = bitcast float* %0 to <4 x float>*
+  store <4 x float> %2, <4 x float>* %3, align 4
+  %index.next = add nuw i64 %index, 4
+  %4 = icmp eq i64 %index.next, 1024
+  br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup:                                 ; preds = %vector.body
+  ret void
+}
+
+define void @sink_splat_fmul_scalable(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fmul_scalable:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a3, a2, 2
+; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    bgeu a4, a3, .LBB20_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    j .LBB20_5
+; CHECK-NEXT:  .LBB20_2: # %vector.ph
+; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    remu a6, a4, a3
+; CHECK-NEXT:    sub a1, a4, a6
+; CHECK-NEXT:    vsetvli a4, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    mv a4, a0
+; CHECK-NEXT:  .LBB20_3: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vl1re32.v v26, (a4)
+; CHECK-NEXT:    vfmul.vv v26, v26, v25
+; CHECK-NEXT:    vs1r.v v26, (a4)
+; CHECK-NEXT:    add a5, a5, a3
+; CHECK-NEXT:    add a4, a4, a2
+; CHECK-NEXT:    bne a5, a1, .LBB20_3
+; CHECK-NEXT:  # %bb.4: # %middle.block
+; CHECK-NEXT:    beqz a6, .LBB20_7
+; CHECK-NEXT:  .LBB20_5: # %for.body.preheader
+; CHECK-NEXT:    addi a2, a1, -1024
+; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:  .LBB20_6: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    flw ft1, 0(a0)
+; CHECK-NEXT:    mv a1, a2
+; CHECK-NEXT:    fmul.s ft1, ft1, ft0
+; CHECK-NEXT:    fsw ft1, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    bgeu a2, a1, .LBB20_6
+; CHECK-NEXT:  .LBB20_7: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %0 = call i64 @llvm.vscale.i64()
+  %1 = shl i64 %0, 1
+  %min.iters.check = icmp ugt i64 %1, 1024
+  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %2 = call i64 @llvm.vscale.i64()
+  %3 = shl i64 %2, 1
+  %n.mod.vf = urem i64 1024, %3
+  %n.vec = sub nsw i64 1024, %n.mod.vf
+  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %4 = call i64 @llvm.vscale.i64()
+  %5 = shl i64 %4, 1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %6 = getelementptr inbounds float, float* %a, i64 %index
+  %7 = bitcast float* %6 to <vscale x 2 x float>*
+  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+  %8 = fmul <vscale x 2 x float> %wide.load, %broadcast.splat
+  %9 = bitcast float* %6 to <vscale x 2 x float>*
+  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
+  %index.next = add nuw i64 %index, %5
+  %10 = icmp eq i64 %index.next, %n.vec
+  br i1 %10, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %cmp.n = icmp eq i64 %n.mod.vf, 0
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:                               ; preds = %entry, %middle.block
+  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
+  br label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
+  %11 = load float, float* %arrayidx, align 4
+  %mul = fmul float %11, %x
+  store float %mul, float* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
+  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+}
+
+define void @sink_splat_fdiv_scalable(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fdiv_scalable:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a3, a2, 2
+; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    bgeu a4, a3, .LBB21_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    j .LBB21_5
+; CHECK-NEXT:  .LBB21_2: # %vector.ph
+; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    remu a6, a4, a3
+; CHECK-NEXT:    sub a1, a4, a6
+; CHECK-NEXT:    vsetvli a4, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    mv a4, a0
+; CHECK-NEXT:  .LBB21_3: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vl1re32.v v26, (a4)
+; CHECK-NEXT:    vfdiv.vv v26, v26, v25
+; CHECK-NEXT:    vs1r.v v26, (a4)
+; CHECK-NEXT:    add a5, a5, a3
+; CHECK-NEXT:    add a4, a4, a2
+; CHECK-NEXT:    bne a5, a1, .LBB21_3
+; CHECK-NEXT:  # %bb.4: # %middle.block
+; CHECK-NEXT:    beqz a6, .LBB21_7
+; CHECK-NEXT:  .LBB21_5: # %for.body.preheader
+; CHECK-NEXT:    addi a2, a1, -1024
+; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:  .LBB21_6: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    flw ft1, 0(a0)
+; CHECK-NEXT:    mv a1, a2
+; CHECK-NEXT:    fdiv.s ft1, ft1, ft0
+; CHECK-NEXT:    fsw ft1, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    bgeu a2, a1, .LBB21_6
+; CHECK-NEXT:  .LBB21_7: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %0 = call i64 @llvm.vscale.i64()
+  %1 = shl i64 %0, 1
+  %min.iters.check = icmp ugt i64 %1, 1024
+  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %2 = call i64 @llvm.vscale.i64()
+  %3 = shl i64 %2, 1
+  %n.mod.vf = urem i64 1024, %3
+  %n.vec = sub nsw i64 1024, %n.mod.vf
+  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %4 = call i64 @llvm.vscale.i64()
+  %5 = shl i64 %4, 1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %6 = getelementptr inbounds float, float* %a, i64 %index
+  %7 = bitcast float* %6 to <vscale x 2 x float>*
+  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+  %8 = fdiv <vscale x 2 x float> %wide.load, %broadcast.splat
+  %9 = bitcast float* %6 to <vscale x 2 x float>*
+  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
+  %index.next = add nuw i64 %index, %5
+  %10 = icmp eq i64 %index.next, %n.vec
+  br i1 %10, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %cmp.n = icmp eq i64 %n.mod.vf, 0
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:                               ; preds = %entry, %middle.block
+  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
+  br label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
+  %11 = load float, float* %arrayidx, align 4
+  %mul = fdiv float %11, %x
+  store float %mul, float* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
+  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+}
+
+define void @sink_splat_frdiv_scalable(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_frdiv_scalable:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a3, a2, 2
+; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    bgeu a4, a3, .LBB22_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    j .LBB22_5
+; CHECK-NEXT:  .LBB22_2: # %vector.ph
+; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    remu a6, a4, a3
+; CHECK-NEXT:    sub a1, a4, a6
+; CHECK-NEXT:    vsetvli a4, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    mv a4, a0
+; CHECK-NEXT:  .LBB22_3: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vl1re32.v v26, (a4)
+; CHECK-NEXT:    vfdiv.vv v26, v25, v26
+; CHECK-NEXT:    vs1r.v v26, (a4)
+; CHECK-NEXT:    add a5, a5, a3
+; CHECK-NEXT:    add a4, a4, a2
+; CHECK-NEXT:    bne a5, a1, .LBB22_3
+; CHECK-NEXT:  # %bb.4: # %middle.block
+; CHECK-NEXT:    beqz a6, .LBB22_7
+; CHECK-NEXT:  .LBB22_5: # %for.body.preheader
+; CHECK-NEXT:    addi a2, a1, -1024
+; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:  .LBB22_6: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    flw ft1, 0(a0)
+; CHECK-NEXT:    mv a1, a2
+; CHECK-NEXT:    fdiv.s ft1, ft0, ft1
+; CHECK-NEXT:    fsw ft1, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    bgeu a2, a1, .LBB22_6
+; CHECK-NEXT:  .LBB22_7: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %0 = call i64 @llvm.vscale.i64()
+  %1 = shl i64 %0, 1
+  %min.iters.check = icmp ugt i64 %1, 1024
+  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %2 = call i64 @llvm.vscale.i64()
+  %3 = shl i64 %2, 1
+  %n.mod.vf = urem i64 1024, %3
+  %n.vec = sub nsw i64 1024, %n.mod.vf
+  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %4 = call i64 @llvm.vscale.i64()
+  %5 = shl i64 %4, 1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %6 = getelementptr inbounds float, float* %a, i64 %index
+  %7 = bitcast float* %6 to <vscale x 2 x float>*
+  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+  %8 = fdiv <vscale x 2 x float> %broadcast.splat, %wide.load
+  %9 = bitcast float* %6 to <vscale x 2 x float>*
+  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
+  %index.next = add nuw i64 %index, %5
+  %10 = icmp eq i64 %index.next, %n.vec
+  br i1 %10, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %cmp.n = icmp eq i64 %n.mod.vf, 0
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:                               ; preds = %entry, %middle.block
+  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
+  br label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
+  %11 = load float, float* %arrayidx, align 4
+  %mul = fdiv float %x, %11
+  store float %mul, float* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
+  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+}
+
+define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fadd_scalable:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a3, a2, 2
+; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    bgeu a4, a3, .LBB23_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    j .LBB23_5
+; CHECK-NEXT:  .LBB23_2: # %vector.ph
+; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    remu a6, a4, a3
+; CHECK-NEXT:    sub a1, a4, a6
+; CHECK-NEXT:    vsetvli a4, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    mv a4, a0
+; CHECK-NEXT:  .LBB23_3: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vl1re32.v v26, (a4)
+; CHECK-NEXT:    vfadd.vv v26, v26, v25
+; CHECK-NEXT:    vs1r.v v26, (a4)
+; CHECK-NEXT:    add a5, a5, a3
+; CHECK-NEXT:    add a4, a4, a2
+; CHECK-NEXT:    bne a5, a1, .LBB23_3
+; CHECK-NEXT:  # %bb.4: # %middle.block
+; CHECK-NEXT:    beqz a6, .LBB23_7
+; CHECK-NEXT:  .LBB23_5: # %for.body.preheader
+; CHECK-NEXT:    addi a2, a1, -1024
+; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:  .LBB23_6: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    flw ft1, 0(a0)
+; CHECK-NEXT:    mv a1, a2
+; CHECK-NEXT:    fadd.s ft1, ft1, ft0
+; CHECK-NEXT:    fsw ft1, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    bgeu a2, a1, .LBB23_6
+; CHECK-NEXT:  .LBB23_7: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %0 = call i64 @llvm.vscale.i64()
+  %1 = shl i64 %0, 1
+  %min.iters.check = icmp ugt i64 %1, 1024
+  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %2 = call i64 @llvm.vscale.i64()
+  %3 = shl i64 %2, 1
+  %n.mod.vf = urem i64 1024, %3
+  %n.vec = sub nsw i64 1024, %n.mod.vf
+  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %4 = call i64 @llvm.vscale.i64()
+  %5 = shl i64 %4, 1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %6 = getelementptr inbounds float, float* %a, i64 %index
+  %7 = bitcast float* %6 to <vscale x 2 x float>*
+  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+  %8 = fadd <vscale x 2 x float> %wide.load, %broadcast.splat
+  %9 = bitcast float* %6 to <vscale x 2 x float>*
+  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
+  %index.next = add nuw i64 %index, %5
+  %10 = icmp eq i64 %index.next, %n.vec
+  br i1 %10, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %cmp.n = icmp eq i64 %n.mod.vf, 0
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:                               ; preds = %entry, %middle.block
+  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
+  br label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
+  %11 = load float, float* %arrayidx, align 4
+  %mul = fadd float %11, %x
+  store float %mul, float* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
+  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+}
+
+define void @sink_splat_fsub_scalable(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fsub_scalable:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a3, a2, 2
+; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    bgeu a4, a3, .LBB24_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    j .LBB24_5
+; CHECK-NEXT:  .LBB24_2: # %vector.ph
+; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    remu a6, a4, a3
+; CHECK-NEXT:    sub a1, a4, a6
+; CHECK-NEXT:    vsetvli a4, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    mv a4, a0
+; CHECK-NEXT:  .LBB24_3: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vl1re32.v v26, (a4)
+; CHECK-NEXT:    vfsub.vv v26, v26, v25
+; CHECK-NEXT:    vs1r.v v26, (a4)
+; CHECK-NEXT:    add a5, a5, a3
+; CHECK-NEXT:    add a4, a4, a2
+; CHECK-NEXT:    bne a5, a1, .LBB24_3
+; CHECK-NEXT:  # %bb.4: # %middle.block
+; CHECK-NEXT:    beqz a6, .LBB24_7
+; CHECK-NEXT:  .LBB24_5: # %for.body.preheader
+; CHECK-NEXT:    addi a2, a1, -1024
+; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:  .LBB24_6: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    flw ft1, 0(a0)
+; CHECK-NEXT:    mv a1, a2
+; CHECK-NEXT:    fsub.s ft1, ft1, ft0
+; CHECK-NEXT:    fsw ft1, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    bgeu a2, a1, .LBB24_6
+; CHECK-NEXT:  .LBB24_7: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %0 = call i64 @llvm.vscale.i64()
+  %1 = shl i64 %0, 1
+  %min.iters.check = icmp ugt i64 %1, 1024
+  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %2 = call i64 @llvm.vscale.i64()
+  %3 = shl i64 %2, 1
+  %n.mod.vf = urem i64 1024, %3
+  %n.vec = sub nsw i64 1024, %n.mod.vf
+  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %4 = call i64 @llvm.vscale.i64()
+  %5 = shl i64 %4, 1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %6 = getelementptr inbounds float, float* %a, i64 %index
+  %7 = bitcast float* %6 to <vscale x 2 x float>*
+  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+  %8 = fsub <vscale x 2 x float> %wide.load, %broadcast.splat
+  %9 = bitcast float* %6 to <vscale x 2 x float>*
+  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
+  %index.next = add nuw i64 %index, %5
+  %10 = icmp eq i64 %index.next, %n.vec
+  br i1 %10, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %cmp.n = icmp eq i64 %n.mod.vf, 0
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:                               ; preds = %entry, %middle.block
+  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
+  br label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
+  %11 = load float, float* %arrayidx, align 4
+  %mul = fsub float %11, %x
+  store float %mul, float* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
+  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+}
+
+define void @sink_splat_frsub_scalable(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_frsub_scalable:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    srli a3, a2, 2
+; CHECK-NEXT:    addi a4, zero, 1024
+; CHECK-NEXT:    fmv.w.x ft0, a1
+; CHECK-NEXT:    bgeu a4, a3, .LBB25_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    mv a1, zero
+; CHECK-NEXT:    j .LBB25_5
+; CHECK-NEXT:  .LBB25_2: # %vector.ph
+; CHECK-NEXT:    mv a5, zero
+; CHECK-NEXT:    remu a6, a4, a3
+; CHECK-NEXT:    sub a1, a4, a6
+; CHECK-NEXT:    vsetvli a4, zero, e32, m1, ta, mu
+; CHECK-NEXT:    vfmv.v.f v25, ft0
+; CHECK-NEXT:    mv a4, a0
+; CHECK-NEXT:  .LBB25_3: # %vector.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    vl1re32.v v26, (a4)
+; CHECK-NEXT:    vfsub.vv v26, v25, v26
+; CHECK-NEXT:    vs1r.v v26, (a4)
+; CHECK-NEXT:    add a5, a5, a3
+; CHECK-NEXT:    add a4, a4, a2
+; CHECK-NEXT:    bne a5, a1, .LBB25_3
+; CHECK-NEXT:  # %bb.4: # %middle.block
+; CHECK-NEXT:    beqz a6, .LBB25_7
+; CHECK-NEXT:  .LBB25_5: # %for.body.preheader
+; CHECK-NEXT:    addi a2, a1, -1024
+; CHECK-NEXT:    slli a1, a1, 2
+; CHECK-NEXT:    add a0, a0, a1
+; CHECK-NEXT:  .LBB25_6: # %for.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    flw ft1, 0(a0)
+; CHECK-NEXT:    mv a1, a2
+; CHECK-NEXT:    fsub.s ft1, ft0, ft1
+; CHECK-NEXT:    fsw ft1, 0(a0)
+; CHECK-NEXT:    addi a2, a2, 1
+; CHECK-NEXT:    addi a0, a0, 4
+; CHECK-NEXT:    bgeu a2, a1, .LBB25_6
+; CHECK-NEXT:  .LBB25_7: # %for.cond.cleanup
+; CHECK-NEXT:    ret
+entry:
+  %0 = call i64 @llvm.vscale.i64()
+  %1 = shl i64 %0, 1
+  %min.iters.check = icmp ugt i64 %1, 1024
+  br i1 %min.iters.check, label %for.body.preheader, label %vector.ph
+
+vector.ph:                                        ; preds = %entry
+  %2 = call i64 @llvm.vscale.i64()
+  %3 = shl i64 %2, 1
+  %n.mod.vf = urem i64 1024, %3
+  %n.vec = sub nsw i64 1024, %n.mod.vf
+  %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
+  %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+  %4 = call i64 @llvm.vscale.i64()
+  %5 = shl i64 %4, 1
+  br label %vector.body
+
+vector.body:                                      ; preds = %vector.body, %vector.ph
+  %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+  %6 = getelementptr inbounds float, float* %a, i64 %index
+  %7 = bitcast float* %6 to <vscale x 2 x float>*
+  %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+  %8 = fsub <vscale x 2 x float> %broadcast.splat, %wide.load
+  %9 = bitcast float* %6 to <vscale x 2 x float>*
+  store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
+  %index.next = add nuw i64 %index, %5
+  %10 = icmp eq i64 %index.next, %n.vec
+  br i1 %10, label %middle.block, label %vector.body
+
+middle.block:                                     ; preds = %vector.body
+  %cmp.n = icmp eq i64 %n.mod.vf, 0
+  br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader:                               ; preds = %entry, %middle.block
+  %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
+  br label %for.body
+
+for.cond.cleanup:                                 ; preds = %for.body, %middle.block
+  ret void
+
+for.body:                                         ; preds = %for.body.preheader, %for.body
+  %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
+  %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
+  %11 = load float, float* %arrayidx, align 4
+  %mul = fsub float %x, %11
+  store float %mul, float* %arrayidx, align 4
+  %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+  %cmp.not = icmp eq i64 %indvars.iv.next, 1024
+  br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+}
+
 declare i64 @llvm.vscale.i64()


        


More information about the llvm-commits mailing list