[llvm] fca9f70 - [AArch64] Add some simple phase ordering tests for interleaving at various factors. NFC

David Green via llvm-commits llvm-commits at lists.llvm.org
Sun Aug 3 06:52:04 PDT 2025


Author: David Green
Date: 2025-08-03T14:51:59+01:00
New Revision: fca9f70e420899b6a61c6d9b46b3e8479d3a431e

URL: https://github.com/llvm/llvm-project/commit/fca9f70e420899b6a61c6d9b46b3e8479d3a431e
DIFF: https://github.com/llvm/llvm-project/commit/fca9f70e420899b6a61c6d9b46b3e8479d3a431e.diff

LOG: [AArch64] Add some simple phase ordering tests for interleaving at various factors. NFC

Added: 
    llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll
new file mode 100644
index 0000000000000..bb6f3e719bb14
--- /dev/null
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/interleave_vec.ll
@@ -0,0 +1,1075 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -passes="default<O3>" -mcpu=neoverse-v2 -S < %s  | FileCheck %s
+
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "aarch64"
+
+define void @same_op2(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op2(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT:    [[TMP0:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[TMP0]]
+; CHECK-NEXT:    [[WIDE_VEC:%.*]] = load <8 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[WIDE_VEC15:%.*]] = load <8 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP0]]
+; CHECK-NEXT:    [[WIDE_VEC18:%.*]] = load <8 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[WIDE_VEC21:%.*]] = load <8 x float>, ptr [[TMP4]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT:    [[WIDE_VEC24:%.*]] = load <8 x float>, ptr [[TMP5]], align 4
+; CHECK-NEXT:    [[WIDE_VEC27:%.*]] = load <8 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = fmul fast <8 x float> [[WIDE_VEC18]], [[WIDE_VEC]]
+; CHECK-NEXT:    [[INTERLEAVED_VEC:%.*]] = fadd fast <8 x float> [[WIDE_VEC24]], [[TMP7]]
+; CHECK-NEXT:    store <8 x float> [[INTERLEAVED_VEC]], ptr [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast <8 x float> [[WIDE_VEC21]], [[WIDE_VEC15]]
+; CHECK-NEXT:    [[INTERLEAVED_VEC30:%.*]] = fadd fast <8 x float> [[WIDE_VEC27]], [[TMP8]]
+; CHECK-NEXT:    store <8 x float> [[INTERLEAVED_VEC30]], ptr [[TMP6]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT:    [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 576
+; CHECK-NEXT:    br i1 [[TMP9]], label %[[FOR_END13:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK:       [[FOR_END13]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 2, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc11, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end13
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 2
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %3 = load i32, ptr %i, align 4
+  %4 = load i32, ptr %j, align 4
+  %add = add nsw i32 %3, %4
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
+  %5 = load float, ptr %arrayidx, align 4
+  %6 = load ptr, ptr %b.addr, align 8
+  %7 = load i32, ptr %i, align 4
+  %8 = load i32, ptr %j, align 4
+  %add4 = add nsw i32 %7, %8
+  %idxprom5 = sext i32 %add4 to i64
+  %arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
+  %9 = load float, ptr %arrayidx6, align 4
+  %mul = fmul fast float %5, %9
+  %10 = load ptr, ptr %a.addr, align 8
+  %11 = load i32, ptr %i, align 4
+  %12 = load i32, ptr %j, align 4
+  %add7 = add nsw i32 %11, %12
+  %idxprom8 = sext i32 %add7 to i64
+  %arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
+  %13 = load float, ptr %arrayidx9, align 4
+  %add10 = fadd fast float %13, %mul
+  store float %add10, ptr %arrayidx9, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %14 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %14, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc11
+
+for.inc11:                                        ; preds = %for.end
+  %15 = load i32, ptr %i, align 4
+  %add12 = add nsw i32 %15, 2
+  store i32 %add12, ptr %i, align 4
+  br label %for.cond
+
+for.end13:                                        ; preds = %for.cond
+  ret void
+}
+
+
+define void @same_op2_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op2_splat(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <8 x i32> zeroinitializer
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 1
+; CHECK-NEXT:    [[TMP3:%.*]] = or disjoint i64 [[OFFSET_IDX]], 8
+; CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP3]]
+; CHECK-NEXT:    [[WIDE_VEC:%.*]] = load <8 x float>, ptr [[TMP4]], align 4
+; CHECK-NEXT:    [[WIDE_VEC13:%.*]] = load <8 x float>, ptr [[TMP5]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP3]]
+; CHECK-NEXT:    [[WIDE_VEC16:%.*]] = load <8 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT:    [[WIDE_VEC19:%.*]] = load <8 x float>, ptr [[TMP7]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast <8 x float> [[WIDE_VEC]], [[TMP1]]
+; CHECK-NEXT:    [[INTERLEAVED_VEC:%.*]] = fadd fast <8 x float> [[WIDE_VEC16]], [[TMP8]]
+; CHECK-NEXT:    store <8 x float> [[INTERLEAVED_VEC]], ptr [[TMP6]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast <8 x float> [[WIDE_VEC13]], [[TMP2]]
+; CHECK-NEXT:    [[INTERLEAVED_VEC22:%.*]] = fadd fast <8 x float> [[WIDE_VEC19]], [[TMP9]]
+; CHECK-NEXT:    store <8 x float> [[INTERLEAVED_VEC22]], ptr [[TMP7]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT:    [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT]], 576
+; CHECK-NEXT:    br i1 [[TMP10]], label %[[FOR_END11:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
+; CHECK:       [[FOR_END11]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 2, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc9, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end11
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 2
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 0
+  %3 = load float, ptr %arrayidx, align 4
+  %4 = load ptr, ptr %b.addr, align 8
+  %5 = load i32, ptr %i, align 4
+  %6 = load i32, ptr %j, align 4
+  %add = add nsw i32 %5, %6
+  %idxprom = sext i32 %add to i64
+  %arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
+  %7 = load float, ptr %arrayidx4, align 4
+  %mul = fmul fast float %3, %7
+  %8 = load ptr, ptr %a.addr, align 8
+  %9 = load i32, ptr %i, align 4
+  %10 = load i32, ptr %j, align 4
+  %add5 = add nsw i32 %9, %10
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
+  %11 = load float, ptr %arrayidx7, align 4
+  %add8 = fadd fast float %11, %mul
+  store float %add8, ptr %arrayidx7, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %12 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %12, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc9
+
+for.inc9:                                         ; preds = %for.end
+  %13 = load i32, ptr %i, align 4
+  %add10 = add nsw i32 %13, 2
+  store i32 %add10, ptr %i, align 4
+  br label %for.cond
+
+for.end11:                                        ; preds = %for.cond
+  ret void
+}
+
+
+define void @same_op3(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op3(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 3
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC:%.*]] = load <12 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC16:%.*]] = load <12 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC20:%.*]] = load <12 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast <12 x float> [[WIDE_VEC16]], [[WIDE_VEC]]
+; CHECK-NEXT:    [[INTERLEAVED_VEC:%.*]] = fadd fast <12 x float> [[WIDE_VEC20]], [[TMP3]]
+; CHECK-NEXT:    store <12 x float> [[INTERLEAVED_VEC]], ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 384
+; CHECK-NEXT:    br i1 [[TMP4]], label %[[FOR_END13:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK:       [[FOR_END13]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 3, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc11, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end13
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 3
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %3 = load i32, ptr %i, align 4
+  %4 = load i32, ptr %j, align 4
+  %add = add nsw i32 %3, %4
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
+  %5 = load float, ptr %arrayidx, align 4
+  %6 = load ptr, ptr %b.addr, align 8
+  %7 = load i32, ptr %i, align 4
+  %8 = load i32, ptr %j, align 4
+  %add4 = add nsw i32 %7, %8
+  %idxprom5 = sext i32 %add4 to i64
+  %arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
+  %9 = load float, ptr %arrayidx6, align 4
+  %mul = fmul fast float %5, %9
+  %10 = load ptr, ptr %a.addr, align 8
+  %11 = load i32, ptr %i, align 4
+  %12 = load i32, ptr %j, align 4
+  %add7 = add nsw i32 %11, %12
+  %idxprom8 = sext i32 %add7 to i64
+  %arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
+  %13 = load float, ptr %arrayidx9, align 4
+  %add10 = fadd fast float %13, %mul
+  store float %add10, ptr %arrayidx9, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %14 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %14, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc11
+
+for.inc11:                                        ; preds = %for.end
+  %15 = load i32, ptr %i, align 4
+  %add12 = add nsw i32 %15, 3
+  store i32 %add12, ptr %i, align 4
+  br label %for.cond
+
+for.end13:                                        ; preds = %for.cond
+  ret void
+}
+
+
+define void @same_op3_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op3_splat(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 3
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC:%.*]] = load <12 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[STRIDED_VEC:%.*]] = shufflevector <12 x float> [[WIDE_VEC]], <12 x float> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+; CHECK-NEXT:    [[STRIDED_VEC12:%.*]] = shufflevector <12 x float> [[WIDE_VEC]], <12 x float> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+; CHECK-NEXT:    [[STRIDED_VEC13:%.*]] = shufflevector <12 x float> [[WIDE_VEC]], <12 x float> poison, <4 x i32> <i32 2, i32 5, i32 8, i32 11>
+; CHECK-NEXT:    [[TMP2:%.*]] = fmul fast <4 x float> [[STRIDED_VEC]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC14:%.*]] = load <12 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[STRIDED_VEC15:%.*]] = shufflevector <12 x float> [[WIDE_VEC14]], <12 x float> poison, <4 x i32> <i32 0, i32 3, i32 6, i32 9>
+; CHECK-NEXT:    [[STRIDED_VEC16:%.*]] = shufflevector <12 x float> [[WIDE_VEC14]], <12 x float> poison, <4 x i32> <i32 1, i32 4, i32 7, i32 10>
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd fast <4 x float> [[STRIDED_VEC15]], [[TMP2]]
+; CHECK-NEXT:    [[TMP5:%.*]] = fmul fast <4 x float> [[STRIDED_VEC12]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT:    [[TMP6:%.*]] = fadd fast <4 x float> [[STRIDED_VEC16]], [[TMP5]]
+; CHECK-NEXT:    [[TMP7:%.*]] = fmul fast <4 x float> [[STRIDED_VEC13]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <4 x float> [[TMP4]], <4 x float> [[TMP6]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT:    [[TMP9:%.*]] = shufflevector <12 x float> [[WIDE_VEC14]], <12 x float> poison, <8 x i32> <i32 2, i32 5, i32 8, i32 11, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP10:%.*]] = shufflevector <4 x float> [[TMP7]], <4 x float> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 poison, i32 poison, i32 poison, i32 poison>
+; CHECK-NEXT:    [[TMP11:%.*]] = fadd fast <8 x float> [[TMP9]], [[TMP10]]
+; CHECK-NEXT:    [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x float> [[TMP8]], <8 x float> [[TMP11]], <12 x i32> <i32 0, i32 4, i32 8, i32 1, i32 5, i32 9, i32 2, i32 6, i32 10, i32 3, i32 7, i32 11>
+; CHECK-NEXT:    store <12 x float> [[INTERLEAVED_VEC]], ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT:    [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], 384
+; CHECK-NEXT:    br i1 [[TMP12]], label %[[FOR_END11:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK:       [[FOR_END11]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 3, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc9, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end11
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 3
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 0
+  %3 = load float, ptr %arrayidx, align 4
+  %4 = load ptr, ptr %b.addr, align 8
+  %5 = load i32, ptr %i, align 4
+  %6 = load i32, ptr %j, align 4
+  %add = add nsw i32 %5, %6
+  %idxprom = sext i32 %add to i64
+  %arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
+  %7 = load float, ptr %arrayidx4, align 4
+  %mul = fmul fast float %3, %7
+  %8 = load ptr, ptr %a.addr, align 8
+  %9 = load i32, ptr %i, align 4
+  %10 = load i32, ptr %j, align 4
+  %add5 = add nsw i32 %9, %10
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
+  %11 = load float, ptr %arrayidx7, align 4
+  %add8 = fadd fast float %11, %mul
+  store float %add8, ptr %arrayidx7, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %12 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %12, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc9
+
+for.inc9:                                         ; preds = %for.end
+  %13 = load i32, ptr %i, align 4
+  %add10 = add nsw i32 %13, 3
+  store i32 %add10, ptr %i, align 4
+  br label %for.cond
+
+for.end11:                                        ; preds = %for.cond
+  ret void
+}
+
+
+define void @same_op4(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op4(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP0:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC:%.*]] = load <16 x float>, ptr [[TMP0]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC17:%.*]] = load <16 x float>, ptr [[TMP1]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC22:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = fmul fast <16 x float> [[WIDE_VEC17]], [[WIDE_VEC]]
+; CHECK-NEXT:    [[INTERLEAVED_VEC:%.*]] = fadd fast <16 x float> [[WIDE_VEC22]], [[TMP3]]
+; CHECK-NEXT:    store <16 x float> [[INTERLEAVED_VEC]], ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 288
+; CHECK-NEXT:    br i1 [[TMP4]], label %[[FOR_END13:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK:       [[FOR_END13]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 4, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc11, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end13
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 4
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %3 = load i32, ptr %i, align 4
+  %4 = load i32, ptr %j, align 4
+  %add = add nsw i32 %3, %4
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
+  %5 = load float, ptr %arrayidx, align 4
+  %6 = load ptr, ptr %b.addr, align 8
+  %7 = load i32, ptr %i, align 4
+  %8 = load i32, ptr %j, align 4
+  %add4 = add nsw i32 %7, %8
+  %idxprom5 = sext i32 %add4 to i64
+  %arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
+  %9 = load float, ptr %arrayidx6, align 4
+  %mul = fmul fast float %5, %9
+  %10 = load ptr, ptr %a.addr, align 8
+  %11 = load i32, ptr %i, align 4
+  %12 = load i32, ptr %j, align 4
+  %add7 = add nsw i32 %11, %12
+  %idxprom8 = sext i32 %add7 to i64
+  %arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
+  %13 = load float, ptr %arrayidx9, align 4
+  %add10 = fadd fast float %13, %mul
+  store float %add10, ptr %arrayidx9, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %14 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %14, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc11
+
+for.inc11:                                        ; preds = %for.end
+  %15 = load i32, ptr %i, align 4
+  %add12 = add nsw i32 %15, 4
+  store i32 %add12, ptr %i, align 4
+  br label %for.cond
+
+for.end13:                                        ; preds = %for.cond
+  ret void
+}
+
+
+define void @same_op4_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op4_splat(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <4 x float> [[BROADCAST_SPLATINSERT]], <4 x float> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP2:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC:%.*]] = load <16 x float>, ptr [[TMP2]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC15:%.*]] = load <16 x float>, ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = fmul fast <16 x float> [[WIDE_VEC]], [[TMP1]]
+; CHECK-NEXT:    [[INTERLEAVED_VEC:%.*]] = fadd fast <16 x float> [[WIDE_VEC15]], [[TMP4]]
+; CHECK-NEXT:    store <16 x float> [[INTERLEAVED_VEC]], ptr [[TMP3]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT:    [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 288
+; CHECK-NEXT:    br i1 [[TMP5]], label %[[FOR_END11:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK:       [[FOR_END11]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 4, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc9, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end11
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 4
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 0
+  %3 = load float, ptr %arrayidx, align 4
+  %4 = load ptr, ptr %b.addr, align 8
+  %5 = load i32, ptr %i, align 4
+  %6 = load i32, ptr %j, align 4
+  %add = add nsw i32 %5, %6
+  %idxprom = sext i32 %add to i64
+  %arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
+  %7 = load float, ptr %arrayidx4, align 4
+  %mul = fmul fast float %3, %7
+  %8 = load ptr, ptr %a.addr, align 8
+  %9 = load i32, ptr %i, align 4
+  %10 = load i32, ptr %j, align 4
+  %add5 = add nsw i32 %9, %10
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
+  %11 = load float, ptr %arrayidx7, align 4
+  %add8 = fadd fast float %11, %mul
+  store float %add8, ptr %arrayidx7, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %12 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %12, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc9
+
+for.inc9:                                         ; preds = %for.end
+  %13 = load i32, ptr %i, align 4
+  %add10 = add nsw i32 %13, 4
+  store i32 %add10, ptr %i, align 4
+  br label %for.cond
+
+for.end11:                                        ; preds = %for.cond
+  ret void
+}
+
+
+define void @same_op6(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op6(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_COND1_PREHEADER:.*]]
+; CHECK:       [[FOR_COND1_PREHEADER]]:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_COND1_PREHEADER]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX6]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = fmul fast <4 x float> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, ptr [[ARRAYIDX9]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd fast <4 x float> [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    store <4 x float> [[TMP4]], ptr [[ARRAYIDX9]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
+; CHECK-NEXT:    [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[TMP5]]
+; CHECK-NEXT:    [[ARRAYIDX6_4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP5]]
+; CHECK-NEXT:    [[ARRAYIDX9_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP5]]
+; CHECK-NEXT:    [[TMP6:%.*]] = load <2 x float>, ptr [[ARRAYIDX_4]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <2 x float>, ptr [[ARRAYIDX6_4]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast <2 x float> [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load <2 x float>, ptr [[ARRAYIDX9_4]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = fadd fast <2 x float> [[TMP9]], [[TMP8]]
+; CHECK-NEXT:    store <2 x float> [[TMP10]], ptr [[ARRAYIDX9_4]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 6
+; CHECK-NEXT:    [[CMP:%.*]] = icmp samesign ult i64 [[INDVARS_IV]], 1146
+; CHECK-NEXT:    br i1 [[CMP]], label %[[FOR_COND1_PREHEADER]], label %[[FOR_END13:.*]]
+; CHECK:       [[FOR_END13]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 6, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc11, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end13
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 6
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %3 = load i32, ptr %i, align 4
+  %4 = load i32, ptr %j, align 4
+  %add = add nsw i32 %3, %4
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
+  %5 = load float, ptr %arrayidx, align 4
+  %6 = load ptr, ptr %b.addr, align 8
+  %7 = load i32, ptr %i, align 4
+  %8 = load i32, ptr %j, align 4
+  %add4 = add nsw i32 %7, %8
+  %idxprom5 = sext i32 %add4 to i64
+  %arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
+  %9 = load float, ptr %arrayidx6, align 4
+  %mul = fmul fast float %5, %9
+  %10 = load ptr, ptr %a.addr, align 8
+  %11 = load i32, ptr %i, align 4
+  %12 = load i32, ptr %j, align 4
+  %add7 = add nsw i32 %11, %12
+  %idxprom8 = sext i32 %add7 to i64
+  %arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
+  %13 = load float, ptr %arrayidx9, align 4
+  %add10 = fadd fast float %13, %mul
+  store float %add10, ptr %arrayidx9, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %14 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %14, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc11
+
+for.inc11:                                        ; preds = %for.end
+  %15 = load i32, ptr %i, align 4
+  %add12 = add nsw i32 %15, 6
+  store i32 %add12, ptr %i, align 4
+  br label %for.cond
+
+for.end13:                                        ; preds = %for.cond
+  ret void
+}
+
+
+define void @same_op6_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op6_splat(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = insertelement <4 x float> poison, float [[TMP0]], i64 0
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <4 x float> [[TMP1]], <4 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i64 0
+; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x float> [[TMP3]], <2 x float> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT:    br label %[[FOR_COND1_PREHEADER:.*]]
+; CHECK:       [[FOR_COND1_PREHEADER]]:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_COND1_PREHEADER]] ]
+; CHECK-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[TMP5:%.*]] = load <4 x float>, ptr [[ARRAYIDX4]], align 4
+; CHECK-NEXT:    [[TMP6:%.*]] = fmul fast <4 x float> [[TMP5]], [[TMP2]]
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX7]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = fadd fast <4 x float> [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    store <4 x float> [[TMP8]], ptr [[ARRAYIDX7]], align 4
+; CHECK-NEXT:    [[TMP9:%.*]] = add nuw nsw i64 [[INDVARS_IV]], 4
+; CHECK-NEXT:    [[ARRAYIDX4_4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP9]]
+; CHECK-NEXT:    [[ARRAYIDX7_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP9]]
+; CHECK-NEXT:    [[TMP10:%.*]] = load <2 x float>, ptr [[ARRAYIDX4_4]], align 4
+; CHECK-NEXT:    [[TMP11:%.*]] = fmul fast <2 x float> [[TMP10]], [[TMP4]]
+; CHECK-NEXT:    [[TMP12:%.*]] = load <2 x float>, ptr [[ARRAYIDX7_4]], align 4
+; CHECK-NEXT:    [[TMP13:%.*]] = fadd fast <2 x float> [[TMP12]], [[TMP11]]
+; CHECK-NEXT:    store <2 x float> [[TMP13]], ptr [[ARRAYIDX7_4]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 6
+; CHECK-NEXT:    [[CMP:%.*]] = icmp samesign ult i64 [[INDVARS_IV]], 1146
+; CHECK-NEXT:    br i1 [[CMP]], label %[[FOR_COND1_PREHEADER]], label %[[FOR_END11:.*]]
+; CHECK:       [[FOR_END11]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 6, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc9, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end11
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 6
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 0
+  %3 = load float, ptr %arrayidx, align 4
+  %4 = load ptr, ptr %b.addr, align 8
+  %5 = load i32, ptr %i, align 4
+  %6 = load i32, ptr %j, align 4
+  %add = add nsw i32 %5, %6
+  %idxprom = sext i32 %add to i64
+  %arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
+  %7 = load float, ptr %arrayidx4, align 4
+  %mul = fmul fast float %3, %7
+  %8 = load ptr, ptr %a.addr, align 8
+  %9 = load i32, ptr %i, align 4
+  %10 = load i32, ptr %j, align 4
+  %add5 = add nsw i32 %9, %10
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
+  %11 = load float, ptr %arrayidx7, align 4
+  %add8 = fadd fast float %11, %mul
+  store float %add8, ptr %arrayidx7, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %12 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %12, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc9
+
+for.inc9:                                         ; preds = %for.end
+  %13 = load i32, ptr %i, align 4
+  %add10 = add nsw i32 %13, 6
+  store i32 %add10, ptr %i, align 4
+  br label %for.cond
+
+for.end11:                                        ; preds = %for.cond
+  ret void
+}
+
+
+define void @same_op8(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op8(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    br label %[[FOR_COND1_PREHEADER:.*]]
+; CHECK:       [[FOR_COND1_PREHEADER]]:
+; CHECK-NEXT:    [[INDVARS_IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDVARS_IV_NEXT:%.*]], %[[FOR_COND1_PREHEADER]] ]
+; CHECK-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[ARRAYIDX9:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[INDVARS_IV]]
+; CHECK-NEXT:    [[TMP0:%.*]] = load <4 x float>, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x float>, ptr [[ARRAYIDX6]], align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = fmul fast <4 x float> [[TMP1]], [[TMP0]]
+; CHECK-NEXT:    [[TMP3:%.*]] = load <4 x float>, ptr [[ARRAYIDX9]], align 4
+; CHECK-NEXT:    [[TMP4:%.*]] = fadd fast <4 x float> [[TMP3]], [[TMP2]]
+; CHECK-NEXT:    store <4 x float> [[TMP4]], ptr [[ARRAYIDX9]], align 4
+; CHECK-NEXT:    [[TMP5:%.*]] = or disjoint i64 [[INDVARS_IV]], 4
+; CHECK-NEXT:    [[ARRAYIDX_4:%.*]] = getelementptr inbounds nuw float, ptr [[C]], i64 [[TMP5]]
+; CHECK-NEXT:    [[ARRAYIDX6_4:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[TMP5]]
+; CHECK-NEXT:    [[ARRAYIDX9_4:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[TMP5]]
+; CHECK-NEXT:    [[TMP6:%.*]] = load <4 x float>, ptr [[ARRAYIDX_4]], align 4
+; CHECK-NEXT:    [[TMP7:%.*]] = load <4 x float>, ptr [[ARRAYIDX6_4]], align 4
+; CHECK-NEXT:    [[TMP8:%.*]] = fmul fast <4 x float> [[TMP7]], [[TMP6]]
+; CHECK-NEXT:    [[TMP9:%.*]] = load <4 x float>, ptr [[ARRAYIDX9_4]], align 4
+; CHECK-NEXT:    [[TMP10:%.*]] = fadd fast <4 x float> [[TMP9]], [[TMP8]]
+; CHECK-NEXT:    store <4 x float> [[TMP10]], ptr [[ARRAYIDX9_4]], align 4
+; CHECK-NEXT:    [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 8
+; CHECK-NEXT:    [[CMP:%.*]] = icmp samesign ult i64 [[INDVARS_IV]], 1144
+; CHECK-NEXT:    br i1 [[CMP]], label %[[FOR_COND1_PREHEADER]], label %[[FOR_END13:.*]]
+; CHECK:       [[FOR_END13]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 8, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc11, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end13
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 8
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %3 = load i32, ptr %i, align 4
+  %4 = load i32, ptr %j, align 4
+  %add = add nsw i32 %3, %4
+  %idxprom = sext i32 %add to i64
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 %idxprom
+  %5 = load float, ptr %arrayidx, align 4
+  %6 = load ptr, ptr %b.addr, align 8
+  %7 = load i32, ptr %i, align 4
+  %8 = load i32, ptr %j, align 4
+  %add4 = add nsw i32 %7, %8
+  %idxprom5 = sext i32 %add4 to i64
+  %arrayidx6 = getelementptr inbounds float, ptr %6, i64 %idxprom5
+  %9 = load float, ptr %arrayidx6, align 4
+  %mul = fmul fast float %5, %9
+  %10 = load ptr, ptr %a.addr, align 8
+  %11 = load i32, ptr %i, align 4
+  %12 = load i32, ptr %j, align 4
+  %add7 = add nsw i32 %11, %12
+  %idxprom8 = sext i32 %add7 to i64
+  %arrayidx9 = getelementptr inbounds float, ptr %10, i64 %idxprom8
+  %13 = load float, ptr %arrayidx9, align 4
+  %add10 = fadd fast float %13, %mul
+  store float %add10, ptr %arrayidx9, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %14 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %14, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc11
+
+for.inc11:                                        ; preds = %for.end
+  %15 = load i32, ptr %i, align 4
+  %add12 = add nsw i32 %15, 8
+  store i32 %add12, ptr %i, align 4
+  br label %for.cond
+
+for.end13:                                        ; preds = %for.cond
+  ret void
+}
+
+
+define void @same_op8_splat(ptr noalias noundef %a, ptr noundef %b, ptr noundef %c) {
+; CHECK-LABEL: define void @same_op8_splat(
+; CHECK-SAME: ptr noalias noundef captures(none) [[A:%.*]], ptr noundef readonly captures(none) [[B:%.*]], ptr noundef readonly captures(none) [[C:%.*]]) local_unnamed_addr #[[ATTR0]] {
+; CHECK-NEXT:  [[ENTRY:.*]]:
+; CHECK-NEXT:    [[TMP0:%.*]] = load float, ptr [[C]], align 4
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x float> poison, float [[TMP0]], i64 0
+; CHECK-NEXT:    [[TMP1:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP2:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP3:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[TMP4:%.*]] = shufflevector <2 x float> [[BROADCAST_SPLATINSERT]], <2 x float> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    br label %[[VECTOR_BODY:.*]]
+; CHECK:       [[VECTOR_BODY]]:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
+; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds nuw float, ptr [[B]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC:%.*]] = load <16 x float>, ptr [[TMP5]], align 4
+; CHECK-NEXT:    [[STRIDED_VEC:%.*]] = shufflevector <16 x float> [[WIDE_VEC]], <16 x float> poison, <2 x i32> <i32 0, i32 8>
+; CHECK-NEXT:    [[STRIDED_VEC12:%.*]] = shufflevector <16 x float> [[WIDE_VEC]], <16 x float> poison, <2 x i32> <i32 1, i32 9>
+; CHECK-NEXT:    [[STRIDED_VEC13:%.*]] = shufflevector <16 x float> [[WIDE_VEC]], <16 x float> poison, <2 x i32> <i32 2, i32 10>
+; CHECK-NEXT:    [[STRIDED_VEC14:%.*]] = shufflevector <16 x float> [[WIDE_VEC]], <16 x float> poison, <2 x i32> <i32 3, i32 11>
+; CHECK-NEXT:    [[STRIDED_VEC15:%.*]] = shufflevector <16 x float> [[WIDE_VEC]], <16 x float> poison, <2 x i32> <i32 4, i32 12>
+; CHECK-NEXT:    [[STRIDED_VEC16:%.*]] = shufflevector <16 x float> [[WIDE_VEC]], <16 x float> poison, <2 x i32> <i32 5, i32 13>
+; CHECK-NEXT:    [[STRIDED_VEC17:%.*]] = shufflevector <16 x float> [[WIDE_VEC]], <16 x float> poison, <2 x i32> <i32 6, i32 14>
+; CHECK-NEXT:    [[STRIDED_VEC18:%.*]] = shufflevector <16 x float> [[WIDE_VEC]], <16 x float> poison, <2 x i32> <i32 7, i32 15>
+; CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds nuw float, ptr [[A]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT:    [[WIDE_VEC19:%.*]] = load <16 x float>, ptr [[TMP6]], align 4
+; CHECK-NEXT:    [[STRIDED_VEC20:%.*]] = shufflevector <16 x float> [[WIDE_VEC19]], <16 x float> poison, <2 x i32> <i32 0, i32 8>
+; CHECK-NEXT:    [[STRIDED_VEC21:%.*]] = shufflevector <16 x float> [[WIDE_VEC19]], <16 x float> poison, <2 x i32> <i32 1, i32 9>
+; CHECK-NEXT:    [[STRIDED_VEC22:%.*]] = shufflevector <16 x float> [[WIDE_VEC19]], <16 x float> poison, <2 x i32> <i32 2, i32 10>
+; CHECK-NEXT:    [[STRIDED_VEC23:%.*]] = shufflevector <16 x float> [[WIDE_VEC19]], <16 x float> poison, <2 x i32> <i32 3, i32 11>
+; CHECK-NEXT:    [[STRIDED_VEC24:%.*]] = shufflevector <16 x float> [[WIDE_VEC19]], <16 x float> poison, <2 x i32> <i32 4, i32 12>
+; CHECK-NEXT:    [[STRIDED_VEC25:%.*]] = shufflevector <16 x float> [[WIDE_VEC19]], <16 x float> poison, <2 x i32> <i32 5, i32 13>
+; CHECK-NEXT:    [[STRIDED_VEC26:%.*]] = shufflevector <16 x float> [[WIDE_VEC19]], <16 x float> poison, <2 x i32> <i32 6, i32 14>
+; CHECK-NEXT:    [[STRIDED_VEC27:%.*]] = shufflevector <16 x float> [[WIDE_VEC19]], <16 x float> poison, <2 x i32> <i32 7, i32 15>
+; CHECK-NEXT:    [[TMP7:%.*]] = shufflevector <2 x float> [[STRIDED_VEC20]], <2 x float> [[STRIDED_VEC21]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[TMP8:%.*]] = shufflevector <2 x float> [[STRIDED_VEC]], <2 x float> [[STRIDED_VEC12]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[TMP9:%.*]] = fmul fast <4 x float> [[TMP8]], [[TMP1]]
+; CHECK-NEXT:    [[TMP10:%.*]] = fadd fast <4 x float> [[TMP7]], [[TMP9]]
+; CHECK-NEXT:    [[TMP11:%.*]] = shufflevector <2 x float> [[STRIDED_VEC22]], <2 x float> [[STRIDED_VEC23]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[TMP12:%.*]] = shufflevector <2 x float> [[STRIDED_VEC13]], <2 x float> [[STRIDED_VEC14]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[TMP13:%.*]] = fmul fast <4 x float> [[TMP12]], [[TMP2]]
+; CHECK-NEXT:    [[TMP14:%.*]] = fadd fast <4 x float> [[TMP11]], [[TMP13]]
+; CHECK-NEXT:    [[TMP15:%.*]] = shufflevector <2 x float> [[STRIDED_VEC24]], <2 x float> [[STRIDED_VEC25]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[TMP16:%.*]] = shufflevector <2 x float> [[STRIDED_VEC15]], <2 x float> [[STRIDED_VEC16]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[TMP17:%.*]] = fmul fast <4 x float> [[TMP16]], [[TMP3]]
+; CHECK-NEXT:    [[TMP18:%.*]] = fadd fast <4 x float> [[TMP15]], [[TMP17]]
+; CHECK-NEXT:    [[TMP19:%.*]] = shufflevector <2 x float> [[STRIDED_VEC26]], <2 x float> [[STRIDED_VEC27]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[TMP20:%.*]] = shufflevector <2 x float> [[STRIDED_VEC17]], <2 x float> [[STRIDED_VEC18]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT:    [[TMP21:%.*]] = fmul fast <4 x float> [[TMP20]], [[TMP4]]
+; CHECK-NEXT:    [[TMP22:%.*]] = fadd fast <4 x float> [[TMP19]], [[TMP21]]
+; CHECK-NEXT:    [[TMP23:%.*]] = shufflevector <4 x float> [[TMP10]], <4 x float> [[TMP14]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT:    [[TMP24:%.*]] = shufflevector <4 x float> [[TMP18]], <4 x float> [[TMP22]], <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; CHECK-NEXT:    [[INTERLEAVED_VEC:%.*]] = shufflevector <8 x float> [[TMP23]], <8 x float> [[TMP24]], <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 1, i32 3, i32 5, i32 7, i32 9, i32 11, i32 13, i32 15>
+; CHECK-NEXT:    store <16 x float> [[INTERLEAVED_VEC]], ptr [[TMP6]], align 4
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT:    [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 144
+; CHECK-NEXT:    br i1 [[TMP25]], label %[[FOR_END11:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK:       [[FOR_END11]]:
+; CHECK-NEXT:    ret void
+;
+entry:
+  %a.addr = alloca ptr, align 8
+  %b.addr = alloca ptr, align 8
+  %c.addr = alloca ptr, align 8
+  %N = alloca i32, align 4
+  %i = alloca i32, align 4
+  %j = alloca i32, align 4
+  store ptr %a, ptr %a.addr, align 8
+  store ptr %b, ptr %b.addr, align 8
+  store ptr %c, ptr %c.addr, align 8
+  store i32 8, ptr %N, align 4
+  store i32 0, ptr %i, align 4
+  br label %for.cond
+
+for.cond:                                         ; preds = %for.inc9, %entry
+  %0 = load i32, ptr %i, align 4
+  %cmp = icmp slt i32 %0, 1152
+  br i1 %cmp, label %for.body, label %for.end11
+
+for.body:                                         ; preds = %for.cond
+  store i32 0, ptr %j, align 4
+  br label %for.cond1
+
+for.cond1:                                        ; preds = %for.inc, %for.body
+  %1 = load i32, ptr %j, align 4
+  %cmp2 = icmp slt i32 %1, 8
+  br i1 %cmp2, label %for.body3, label %for.end
+
+for.body3:                                        ; preds = %for.cond1
+  %2 = load ptr, ptr %c.addr, align 8
+  %arrayidx = getelementptr inbounds float, ptr %2, i64 0
+  %3 = load float, ptr %arrayidx, align 4
+  %4 = load ptr, ptr %b.addr, align 8
+  %5 = load i32, ptr %i, align 4
+  %6 = load i32, ptr %j, align 4
+  %add = add nsw i32 %5, %6
+  %idxprom = sext i32 %add to i64
+  %arrayidx4 = getelementptr inbounds float, ptr %4, i64 %idxprom
+  %7 = load float, ptr %arrayidx4, align 4
+  %mul = fmul fast float %3, %7
+  %8 = load ptr, ptr %a.addr, align 8
+  %9 = load i32, ptr %i, align 4
+  %10 = load i32, ptr %j, align 4
+  %add5 = add nsw i32 %9, %10
+  %idxprom6 = sext i32 %add5 to i64
+  %arrayidx7 = getelementptr inbounds float, ptr %8, i64 %idxprom6
+  %11 = load float, ptr %arrayidx7, align 4
+  %add8 = fadd fast float %11, %mul
+  store float %add8, ptr %arrayidx7, align 4
+  br label %for.inc
+
+for.inc:                                          ; preds = %for.body3
+  %12 = load i32, ptr %j, align 4
+  %inc = add nsw i32 %12, 1
+  store i32 %inc, ptr %j, align 4
+  br label %for.cond1
+
+for.end:                                          ; preds = %for.cond1
+  br label %for.inc9
+
+for.inc9:                                         ; preds = %for.end
+  %13 = load i32, ptr %i, align 4
+  %add10 = add nsw i32 %13, 8
+  store i32 %add10, ptr %i, align 4
+  br label %for.cond
+
+for.end11:                                        ; preds = %for.cond
+  ret void
+}
+;.
+; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
+; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
+; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"}
+; CHECK: [[LOOP3]] = distinct !{[[LOOP3]], [[META1]], [[META2]]}
+; CHECK: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
+; CHECK: [[LOOP5]] = distinct !{[[LOOP5]], [[META1]], [[META2]]}
+; CHECK: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
+; CHECK: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
+; CHECK: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
+;.


        


More information about the llvm-commits mailing list