[llvm] f41767d - [LV] Add replicating load/store cost tests for Apple CPUs.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 21 08:37:29 PST 2026
Author: Florian Hahn
Date: 2026-01-21T16:37:12Z
New Revision: f41767db9db41ba3d31b02bb2d116113a27d9009
URL: https://github.com/llvm/llvm-project/commit/f41767db9db41ba3d31b02bb2d116113a27d9009
DIFF: https://github.com/llvm/llvm-project/commit/f41767db9db41ba3d31b02bb2d116113a27d9009.diff
LOG: [LV] Add replicating load/store cost tests for Apple CPUs.
Add dedicated tests to check replicating load/store costs on Apple CPUs.
Added:
llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs-apple.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs-apple.ll b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs-apple.ll
new file mode 100644
index 0000000000000..b439353444409
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/replicating-load-store-costs-apple.ll
@@ -0,0 +1,813 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals none --filter-out-after "scalar.ph:" --version 6
+; RUN: opt -p loop-vectorize -mcpu=apple-m1 -S %s | FileCheck %s
+
+target triple = "arm64-apple-macosx"
+
+define void @replicating_load_used_as_store_addr(ptr noalias %A) {
+; CHECK-LABEL: define void @replicating_load_used_as_store_addr(
+; CHECK-SAME: ptr noalias [[A:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[TMP0]], 1
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[TMP11]], 1
+; CHECK-NEXT: [[TMP16:%.*]] = add i64 [[TMP12]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr ptr, ptr [[A]], i64 [[INDEX]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr ptr, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr ptr, ptr [[A]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr ptr, ptr [[A]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[TMP3]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = load ptr, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP13:%.*]] = load ptr, ptr [[TMP19]], align 8
+; CHECK-NEXT: [[TMP14:%.*]] = load ptr, ptr [[TMP10]], align 8
+; CHECK-NEXT: [[TMP7:%.*]] = trunc i64 [[TMP1]] to i32
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i64 [[TMP2]] to i32
+; CHECK-NEXT: [[TMP17:%.*]] = trunc i64 [[TMP15]] to i32
+; CHECK-NEXT: [[TMP18:%.*]] = trunc i64 [[TMP16]] to i32
+; CHECK-NEXT: store i32 [[TMP7]], ptr [[TMP5]], align 4
+; CHECK-NEXT: store i32 [[TMP8]], ptr [[TMP6]], align 4
+; CHECK-NEXT: store i32 [[TMP17]], ptr [[TMP13]], align 4
+; CHECK-NEXT: store i32 [[TMP18]], ptr [[TMP14]], align 4
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[SCALAR_PH:.*]]
+; CHECK: [[SCALAR_PH]]:
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %iv.next = add i64 %iv, 1
+ %gep.A = getelementptr ptr, ptr %A, i64 %iv
+ %l.p = load ptr, ptr %gep.A, align 8
+ %iv.trunc = trunc i64 %iv.next to i32
+ store i32 %iv.trunc, ptr %l.p, align 4
+ %ec = icmp eq i64 %iv, 100
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+define void @replicating_load_used_as_store_addr_2(ptr noalias %invar.dst, ptr noalias %invar.src, ptr noalias %src) {
+; CHECK-LABEL: define void @replicating_load_used_as_store_addr_2(
+; CHECK-SAME: ptr noalias [[INVAR_DST:%.*]], ptr noalias [[INVAR_SRC:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[INVAR_SRC]], align 4
+; CHECK-NEXT: [[TMP1:%.*]] = sext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i128, ptr [[SRC]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP3:%.*]] = load i32, ptr [[TMP2]], align 4
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[TMP3]], 123
+; CHECK-NEXT: store i32 [[TMP4]], ptr [[INVAR_DST]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
+; CHECK-NEXT: br i1 [[TMP5]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %l.offset = load i32, ptr %invar.src, align 4
+ %offset.ext = sext i32 %l.offset to i64
+ %gep.src = getelementptr i128, ptr %src, i64 %offset.ext
+ %l.v = load i32, ptr %gep.src, align 4
+ %add = add i32 %l.v, 123
+ store i32 %add, ptr %invar.dst, align 8
+ %iv.next = add i64 %iv, 1
+ %exitcond41.not = icmp eq i64 %iv.next, 100
+ br i1 %exitcond41.not, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+
+define void @replicating_load_used_as_store_addr_3(ptr noalias %src, ptr noalias %dst, ptr noalias %invar.dst, i8 %x) {
+; CHECK-LABEL: define void @replicating_load_used_as_store_addr_3(
+; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], ptr noalias [[INVAR_DST:%.*]], i8 [[X:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[TMP0:%.*]] = xor i8 [[X]], 10
+; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP1]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP3:%.*]] = load i8, ptr [[TMP2]], align 1
+; CHECK-NEXT: [[TMP4:%.*]] = zext i8 [[TMP3]] to i32
+; CHECK-NEXT: [[TMP5:%.*]] = xor i32 [[TMP4]], 111
+; CHECK-NEXT: [[TMP6:%.*]] = zext i32 [[TMP4]] to i64
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i8, ptr [[DST]], i64 [[TMP6]]
+; CHECK-NEXT: store i8 0, ptr [[TMP7]], align 1
+; CHECK-NEXT: [[TMP8:%.*]] = trunc i32 [[TMP5]] to i8
+; CHECK-NEXT: store i8 [[TMP8]], ptr [[INVAR_DST]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP9:%.*]] = icmp eq i64 [[INDEX_NEXT]], 100
+; CHECK-NEXT: br i1 [[TMP9]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %xor = xor i8 %x, 10
+ %ext = zext i8 %xor to i64
+ %gep.src = getelementptr i8, ptr %src, i64 %ext
+ %l = load i8, ptr %gep.src, align 1
+ %l.ext = zext i8 %l to i32
+ %xor.2 = xor i32 %l.ext, 111
+ %idx2.ext = zext i32 %l.ext to i64
+ %gep.dst = getelementptr i8, ptr %dst, i64 %idx2.ext
+ store i8 0, ptr %gep.dst, align 1
+ %xor.2.trunc = trunc i32 %xor.2 to i8
+ store i8 %xor.2.trunc, ptr %invar.dst, align 1
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv.next, 100
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+define void @uniform_gep_for_replicating_gep(ptr %dst) {
+; CHECK-LABEL: define void @uniform_gep_for_replicating_gep(
+; CHECK-SAME: ptr [[DST:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i32 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_IND:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[STEP_ADD:%.*]] = add <2 x i32> [[VEC_IND]], splat (i32 2)
+; CHECK-NEXT: [[STEP_ADD_2:%.*]] = add <2 x i32> [[STEP_ADD]], splat (i32 2)
+; CHECK-NEXT: [[STEP_ADD_3:%.*]] = add <2 x i32> [[STEP_ADD_2]], splat (i32 2)
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[INDEX]], 2
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[INDEX]], 4
+; CHECK-NEXT: [[TMP4:%.*]] = add i32 [[INDEX]], 6
+; CHECK-NEXT: [[TMP5:%.*]] = icmp eq <2 x i32> [[VEC_IND]], zeroinitializer
+; CHECK-NEXT: [[TMP3:%.*]] = icmp eq <2 x i32> [[STEP_ADD]], zeroinitializer
+; CHECK-NEXT: [[TMP7:%.*]] = icmp eq <2 x i32> [[STEP_ADD_2]], zeroinitializer
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq <2 x i32> [[STEP_ADD_3]], zeroinitializer
+; CHECK-NEXT: [[TMP8:%.*]] = lshr i32 [[INDEX]], 1
+; CHECK-NEXT: [[TMP9:%.*]] = lshr i32 [[TMP2]], 1
+; CHECK-NEXT: [[TMP20:%.*]] = lshr i32 [[TMP1]], 1
+; CHECK-NEXT: [[TMP10:%.*]] = lshr i32 [[TMP4]], 1
+; CHECK-NEXT: [[TMP11:%.*]] = zext <2 x i1> [[TMP5]] to <2 x i8>
+; CHECK-NEXT: [[TMP6:%.*]] = zext <2 x i1> [[TMP3]] to <2 x i8>
+; CHECK-NEXT: [[TMP13:%.*]] = zext <2 x i1> [[TMP7]] to <2 x i8>
+; CHECK-NEXT: [[TMP23:%.*]] = zext <2 x i1> [[TMP16]] to <2 x i8>
+; CHECK-NEXT: [[TMP14:%.*]] = zext i32 [[TMP8]] to i64
+; CHECK-NEXT: [[TMP15:%.*]] = zext i32 [[TMP9]] to i64
+; CHECK-NEXT: [[TMP17:%.*]] = zext i32 [[TMP20]] to i64
+; CHECK-NEXT: [[TMP27:%.*]] = zext i32 [[TMP10]] to i64
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP14]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP17]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr i64, ptr [[DST]], i64 [[TMP27]]
+; CHECK-NEXT: [[TMP22:%.*]] = extractelement <2 x i8> [[TMP11]], i32 1
+; CHECK-NEXT: [[TMP12:%.*]] = extractelement <2 x i8> [[TMP6]], i32 1
+; CHECK-NEXT: [[TMP25:%.*]] = extractelement <2 x i8> [[TMP13]], i32 1
+; CHECK-NEXT: [[TMP26:%.*]] = extractelement <2 x i8> [[TMP23]], i32 1
+; CHECK-NEXT: store i8 [[TMP22]], ptr [[TMP18]], align 1
+; CHECK-NEXT: store i8 [[TMP12]], ptr [[TMP19]], align 1
+; CHECK-NEXT: store i8 [[TMP25]], ptr [[TMP21]], align 1
+; CHECK-NEXT: store i8 [[TMP26]], ptr [[TMP28]], align 1
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i32 [[INDEX]], 8
+; CHECK-NEXT: [[VEC_IND_NEXT]] = add <2 x i32> [[STEP_ADD_3]], splat (i32 2)
+; CHECK-NEXT: [[TMP24:%.*]] = icmp eq i32 [[INDEX_NEXT]], 128
+; CHECK-NEXT: br i1 [[TMP24]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[SCALAR_PH:.*]]
+; CHECK: [[SCALAR_PH]]:
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %c = icmp eq i32 %iv, 0
+ %shift = lshr i32 %iv, 1
+ %ext = zext i1 %c to i8
+ %ext.shift = zext i32 %shift to i64
+ %gep = getelementptr i64, ptr %dst, i64 %ext.shift
+ store i8 %ext, ptr %gep, align 1
+ %iv.next = add i32 %iv, 1
+ %ec = icmp eq i32 %iv, 128
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+define void @test_load_gep_widen_induction(ptr noalias %dst, ptr noalias %dst2) {
+; CHECK-LABEL: define void @test_load_gep_widen_induction(
+; CHECK-SAME: ptr noalias [[DST:%.*]], ptr noalias [[DST2:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP19:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP20:%.*]] = add i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP21:%.*]] = add i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP23:%.*]] = add i64 [[INDEX]], 5
+; CHECK-NEXT: [[TMP24:%.*]] = add i64 [[INDEX]], 6
+; CHECK-NEXT: [[TMP25:%.*]] = add i64 [[INDEX]], 7
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr i128, ptr [[DST]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP6:%.*]] = getelementptr i128, ptr [[DST]], i64 [[TMP19]]
+; CHECK-NEXT: [[TMP26:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP5]], i32 0
+; CHECK-NEXT: [[TMP0:%.*]] = insertelement <2 x ptr> [[TMP26]], ptr [[TMP6]], i32 1
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr i128, ptr [[DST]], i64 [[TMP20]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i128, ptr [[DST]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP27:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP7]], i32 0
+; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x ptr> [[TMP27]], ptr [[TMP8]], i32 1
+; CHECK-NEXT: [[TMP9:%.*]] = getelementptr i128, ptr [[DST]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i128, ptr [[DST]], i64 [[TMP23]]
+; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP9]], i32 0
+; CHECK-NEXT: [[TMP2:%.*]] = insertelement <2 x ptr> [[TMP18]], ptr [[TMP10]], i32 1
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr i128, ptr [[DST]], i64 [[TMP24]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr i128, ptr [[DST]], i64 [[TMP25]]
+; CHECK-NEXT: [[TMP22:%.*]] = insertelement <2 x ptr> poison, ptr [[TMP11]], i32 0
+; CHECK-NEXT: [[TMP3:%.*]] = insertelement <2 x ptr> [[TMP22]], ptr [[TMP17]], i32 1
+; CHECK-NEXT: store ptr null, ptr [[TMP5]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP6]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP7]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP8]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP9]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP10]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP11]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP17]], align 8
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr ptr, ptr [[DST2]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr ptr, ptr [[TMP12]], i64 2
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr ptr, ptr [[TMP12]], i64 4
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr ptr, ptr [[TMP12]], i64 6
+; CHECK-NEXT: store <2 x ptr> [[TMP0]], ptr [[TMP12]], align 8
+; CHECK-NEXT: store <2 x ptr> [[TMP1]], ptr [[TMP13]], align 8
+; CHECK-NEXT: store <2 x ptr> [[TMP2]], ptr [[TMP14]], align 8
+; CHECK-NEXT: store <2 x ptr> [[TMP3]], ptr [[TMP15]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96
+; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[SCALAR_PH:.*]]
+; CHECK: [[SCALAR_PH]]:
+;
+entry:
+ br label %loop
+
+loop: ; preds = %loop, %entry
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.dst.iv = getelementptr i128, ptr %dst, i64 %iv
+ %iv.next = add i64 %iv, 1
+ store ptr null, ptr %gep.dst.iv, align 8
+ %gep.dst2.iv = getelementptr ptr, ptr %dst2, i64 %iv
+ store ptr %gep.dst.iv, ptr %gep.dst2.iv
+ %exitcond.not = icmp eq i64 %iv.next, 100
+ br i1 %exitcond.not, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+define ptr @replicating_store_in_conditional_latch(ptr %p, i32 %n) {
+; CHECK-LABEL: define ptr @replicating_store_in_conditional_latch(
+; CHECK-SAME: ptr [[P:%.*]], i32 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[TMP0:%.*]] = sub i32 0, [[N]]
+; CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP2:%.*]] = lshr i64 [[TMP1]], 1
+; CHECK-NEXT: [[TMP3:%.*]] = add nuw nsw i64 [[TMP2]], 1
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ule i64 [[TMP3]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP3]], 4
+; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[N_MOD_VF]], 0
+; CHECK-NEXT: [[TMP5:%.*]] = select i1 [[TMP4]], i64 4, i64 [[N_MOD_VF]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP3]], [[TMP5]]
+; CHECK-NEXT: [[DOTCAST:%.*]] = trunc i64 [[N_VEC]] to i32
+; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[DOTCAST]], -2
+; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 48
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP7]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 48
+; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 48
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 96
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 144
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[P]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[NEXT_GEP1:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP9]]
+; CHECK-NEXT: [[NEXT_GEP2:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP10]]
+; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[P]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr i8, ptr [[NEXT_GEP]], i64 24
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[NEXT_GEP1]], i64 24
+; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, ptr [[NEXT_GEP2]], i64 24
+; CHECK-NEXT: [[TMP15:%.*]] = getelementptr i8, ptr [[NEXT_GEP3]], i64 24
+; CHECK-NEXT: store ptr null, ptr [[TMP12]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP13]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP14]], align 8
+; CHECK-NEXT: store ptr null, ptr [[TMP15]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP16]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
+entry:
+ br label %loop.header
+
+loop.header:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop.latch ]
+ %ptr.iv = phi ptr [ %p, %entry ], [ %ptr.iv.next, %loop.latch ]
+ %gep.ptr.iv = getelementptr i8, ptr %ptr.iv, i64 24
+ %c = icmp eq i32 %iv, %n
+ br i1 %c, label %exit, label %loop.latch
+
+loop.latch:
+ %iv.next = add nsw i32 %iv, -2
+ store ptr null, ptr %gep.ptr.iv, align 8
+ %ptr.iv.next = getelementptr i8, ptr %ptr.iv, i64 48
+ br label %loop.header
+
+exit:
+ ret ptr %gep.ptr.iv
+}
+
+declare void @init(ptr)
+
+define void @scalar_store_cost_after_discarding_interleave_group(ptr %dst, i32 %x, ptr %src) {
+; CHECK-LABEL: define void @scalar_store_cost_after_discarding_interleave_group(
+; CHECK-SAME: ptr [[DST:%.*]], i32 [[X:%.*]], ptr [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: [[TEMP1:%.*]] = alloca [64 x i32], align 4
+; CHECK-NEXT: call void @init(ptr [[TEMP1]])
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[TMP21:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[TMP22:%.*]] = load i32, ptr [[TEMP1]], align 4
+; CHECK-NEXT: [[SHR_0:%.*]] = lshr i32 [[X]], 1
+; CHECK-NEXT: [[MUL_0:%.*]] = mul i32 [[X]], -171254
+; CHECK-NEXT: [[SHR_1:%.*]] = lshr i32 [[MUL_0]], 1
+; CHECK-NEXT: [[ADD_0:%.*]] = add i32 [[SHR_0]], [[SHR_1]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr i16, ptr [[DST]], i64 [[TMP21]]
+; CHECK-NEXT: store i16 0, ptr [[TMP30]], align 2
+; CHECK-NEXT: [[GEP_0_1:%.*]] = getelementptr i16, ptr [[DST]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP38:%.*]] = getelementptr i8, ptr [[GEP_0_1]], i64 14
+; CHECK-NEXT: store i16 0, ptr [[TMP38]], align 2
+; CHECK-NEXT: [[ADD_1:%.*]] = add i32 [[ADD_0]], 1
+; CHECK-NEXT: [[SHR_2:%.*]] = lshr i32 [[ADD_1]], 1
+; CHECK-NEXT: [[TMP54:%.*]] = trunc i32 [[SHR_2]] to i16
+; CHECK-NEXT: [[TMP46:%.*]] = getelementptr i8, ptr [[TMP30]], i64 2
+; CHECK-NEXT: store i16 [[TMP54]], ptr [[TMP46]], align 2
+; CHECK-NEXT: [[SUB_0:%.*]] = sub i32 0, [[MUL_0]]
+; CHECK-NEXT: [[SHR_3:%.*]] = lshr i32 [[SUB_0]], 1
+; CHECK-NEXT: [[TMP70:%.*]] = trunc i32 [[SHR_3]] to i16
+; CHECK-NEXT: [[TMP62:%.*]] = getelementptr i8, ptr [[TMP30]], i64 12
+; CHECK-NEXT: store i16 [[TMP70]], ptr [[TMP62]], align 2
+; CHECK-NEXT: [[OR_0:%.*]] = or i32 [[X]], 1
+; CHECK-NEXT: [[ADD_2:%.*]] = add i32 [[OR_0]], 1
+; CHECK-NEXT: [[SHR_4:%.*]] = lshr i32 [[ADD_2]], 1
+; CHECK-NEXT: [[TMP86:%.*]] = trunc i32 [[SHR_4]] to i16
+; CHECK-NEXT: [[TMP78:%.*]] = getelementptr i8, ptr [[TMP30]], i64 4
+; CHECK-NEXT: store i16 [[TMP86]], ptr [[TMP78]], align 2
+; CHECK-NEXT: [[GEP_0_2:%.*]] = getelementptr i16, ptr [[DST]], i64 [[TMP21]]
+; CHECK-NEXT: [[TMP94:%.*]] = getelementptr i8, ptr [[GEP_0_2]], i64 10
+; CHECK-NEXT: store i16 0, ptr [[TMP94]], align 2
+; CHECK-NEXT: [[TRUNC_3:%.*]] = trunc i32 [[TMP22]] to i16
+; CHECK-NEXT: [[OR_1:%.*]] = or i16 [[TRUNC_3]], 1
+; CHECK-NEXT: [[TMP113:%.*]] = add i16 [[OR_1]], 1
+; CHECK-NEXT: [[TMP105:%.*]] = getelementptr i8, ptr [[TMP30]], i64 8
+; CHECK-NEXT: store i16 [[TMP113]], ptr [[TMP105]], align 2
+; CHECK-NEXT: [[TMP121:%.*]] = getelementptr i8, ptr [[TMP30]], i64 6
+; CHECK-NEXT: store i16 0, ptr [[TMP121]], align 2
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[TMP21]], 8
+; CHECK-NEXT: [[EC:%.*]] = icmp ult i64 [[TMP21]], 128
+; CHECK-NEXT: br i1 [[EC]], label %[[LOOP]], label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ %temp1 = alloca [64 x i32], align 4
+ call void @init(ptr %temp1)
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %1 = load i32, ptr %temp1, align 4
+ %shr.0 = lshr i32 %x, 1
+ %mul.0 = mul i32 %x, -171254
+ %shr.1 = lshr i32 %mul.0, 1
+ %add.0 = add i32 %shr.0, %shr.1
+ %gep.0 = getelementptr i16, ptr %dst, i64 %iv
+ store i16 0, ptr %gep.0, align 2
+ %gep.0.1 = getelementptr i16, ptr %dst, i64 %iv
+ %gep.14 = getelementptr i8, ptr %gep.0.1, i64 14
+ store i16 0, ptr %gep.14, align 2
+ %add.1 = add i32 %add.0, 1
+ %shr.2 = lshr i32 %add.1, 1
+ %trunc.0 = trunc i32 %shr.2 to i16
+ %gep.2 = getelementptr i8, ptr %gep.0, i64 2
+ store i16 %trunc.0, ptr %gep.2, align 2
+ %sub.0 = sub i32 0, %mul.0
+ %shr.3 = lshr i32 %sub.0, 1
+ %trunc.1 = trunc i32 %shr.3 to i16
+ %gep.12 = getelementptr i8, ptr %gep.0, i64 12
+ store i16 %trunc.1, ptr %gep.12, align 2
+ %or.0 = or i32 %x, 1
+ %add.2 = add i32 %or.0, 1
+ %shr.4 = lshr i32 %add.2, 1
+ %trunc.2 = trunc i32 %shr.4 to i16
+ %gep.4 = getelementptr i8, ptr %gep.0, i64 4
+ store i16 %trunc.2, ptr %gep.4, align 2
+ %gep.0.2 = getelementptr i16, ptr %dst, i64 %iv
+ %gep.10 = getelementptr i8, ptr %gep.0.2, i64 10
+ store i16 0, ptr %gep.10, align 2
+ %trunc.3 = trunc i32 %1 to i16
+ %or.1 = or i16 %trunc.3, 1
+ %add.3 = add i16 %or.1, 1
+ %gep.8 = getelementptr i8, ptr %gep.0, i64 8
+ store i16 %add.3, ptr %gep.8, align 2
+ %gep.6 = getelementptr i8, ptr %gep.0, i64 6
+ store i16 0, ptr %gep.6, align 2
+ %iv.next = add i64 %iv, 8
+ %ec = icmp ult i64 %iv, 128
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+define void @test_prefer_vector_addressing(ptr %start, ptr %ms, ptr noalias %src) {
+; CHECK-LABEL: define void @test_prefer_vector_addressing(
+; CHECK-SAME: ptr [[START:%.*]], ptr [[MS:%.*]], ptr noalias [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: [[START2:%.*]] = ptrtoint ptr [[START]] to i64
+; CHECK-NEXT: [[MS1:%.*]] = ptrtoint ptr [[MS]] to i64
+; CHECK-NEXT: [[GEP_START:%.*]] = getelementptr i8, ptr [[START]], i64 3
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[START2]], 3
+; CHECK-NEXT: [[UMAX:%.*]] = call i64 @llvm.umax.i64(i64 [[MS1]], i64 [[TMP0]])
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[UMAX]], -3
+; CHECK-NEXT: [[TMP2:%.*]] = sub i64 [[TMP1]], [[START2]]
+; CHECK-NEXT: [[UMIN:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP2]], i64 1)
+; CHECK-NEXT: [[TMP3:%.*]] = sub i64 [[TMP2]], [[UMIN]]
+; CHECK-NEXT: [[TMP4:%.*]] = udiv i64 [[TMP3]], 3
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[UMIN]], [[TMP4]]
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[TMP5]], 1
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP6]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP6]], 4
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP6]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[N_VEC]], 3
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr i8, ptr [[GEP_START]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP9:%.*]] = mul i64 [[N_VEC]], 3
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP9]]
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 3
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 6
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 9
+; CHECK-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[START]], i64 [[OFFSET_IDX]]
+; CHECK-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP11]]
+; CHECK-NEXT: [[NEXT_GEP4:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP12]]
+; CHECK-NEXT: [[NEXT_GEP5:%.*]] = getelementptr i8, ptr [[START]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP14:%.*]] = load i64, ptr [[NEXT_GEP]], align 1, !tbaa [[LONG_LONG_TBAA12:![0-9]+]]
+; CHECK-NEXT: [[TMP15:%.*]] = load i64, ptr [[NEXT_GEP3]], align 1, !tbaa [[LONG_LONG_TBAA12]]
+; CHECK-NEXT: [[TMP16:%.*]] = load i64, ptr [[NEXT_GEP4]], align 1, !tbaa [[LONG_LONG_TBAA12]]
+; CHECK-NEXT: [[TMP17:%.*]] = load i64, ptr [[NEXT_GEP5]], align 1, !tbaa [[LONG_LONG_TBAA12]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP14]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP16]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP17]]
+; CHECK-NEXT: store i32 0, ptr [[TMP18]], align 4, !tbaa [[INT_TBAA17:![0-9]+]]
+; CHECK-NEXT: store i32 0, ptr [[TMP19]], align 4, !tbaa [[INT_TBAA17]]
+; CHECK-NEXT: store i32 0, ptr [[TMP20]], align 4, !tbaa [[INT_TBAA17]]
+; CHECK-NEXT: store i32 0, ptr [[TMP21]], align 4, !tbaa [[INT_TBAA17]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP22:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP22]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP19:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP6]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], [[EXIT:label %.*]], label %[[SCALAR_PH]]
+; CHECK: [[SCALAR_PH]]:
+;
+entry:
+ %gep.start = getelementptr i8, ptr %start, i64 3
+ br label %loop
+
+loop:
+ %ptr.iv = phi ptr [ %gep.start, %entry ], [ %ptr.iv.next, %loop ]
+ %recur = phi ptr [ %start, %entry ], [ %ptr.iv, %loop ]
+ %l = load i64, ptr %recur, align 1, !tbaa !0
+ %gep.src = getelementptr i8, ptr %src, i64 %l
+ store i32 0, ptr %gep.src, align 4, !tbaa !5
+ %ptr.iv.next = getelementptr nusw i8, ptr %ptr.iv, i64 3
+ %ec = icmp ult ptr %ptr.iv, %ms
+ br i1 %ec, label %loop, label %exit
+
+exit:
+ ret void
+}
+
+define void @cost_scalar_load_of_address(ptr noalias %src, ptr %dst) {
+; CHECK-LABEL: define void @cost_scalar_load_of_address(
+; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr [[DST:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, %[[ENTRY]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr i32, ptr [[SRC]], i64 [[IV]]
+; CHECK-NEXT: [[L:%.*]] = load i32, ptr [[GEP_SRC]], align 4
+; CHECK-NEXT: [[L_EXT:%.*]] = sext i32 [[L]] to i64
+; CHECK-NEXT: [[GEP_DST:%.*]] = getelementptr i32, ptr [[DST]], i64 [[L_EXT]]
+; CHECK-NEXT: store i32 0, ptr [[GEP_DST]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; CHECK-NEXT: [[EC:%.*]] = icmp eq i64 [[IV]], 8
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep.src = getelementptr i32, ptr %src, i64 %iv
+ %l = load i32, ptr %gep.src, align 4
+ %l.ext = sext i32 %l to i64
+ %gep.dst = getelementptr i32, ptr %dst, i64 %l.ext
+ store i32 0, ptr %gep.dst, align 4
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv, 8
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+%t = type { [3 x double] }
+%t.2 = type { [ 64 x double ] }
+
+define double @test_scalarization_cost_for_load_of_address(ptr %src.0, ptr %src.1, ptr %src.2) {
+; CHECK-LABEL: define double @test_scalarization_cost_for_load_of_address(
+; CHECK-SAME: ptr [[SRC_0:%.*]], ptr [[SRC_1:%.*]], ptr [[SRC_2:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi double [ 3.000000e+00, %[[VECTOR_PH]] ], [ [[TMP21:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 1
+; CHECK-NEXT: [[GEP_0:%.*]] = getelementptr [[T:%.*]], ptr [[SRC_0]], i64 [[IV]]
+; CHECK-NEXT: [[WIDE_VEC:%.*]] = load <6 x double>, ptr [[GEP_0]], align 8
+; CHECK-NEXT: [[STRIDED_VEC:%.*]] = shufflevector <6 x double> [[WIDE_VEC]], <6 x double> poison, <2 x i32> <i32 0, i32 3>
+; CHECK-NEXT: [[STRIDED_VEC1:%.*]] = shufflevector <6 x double> [[WIDE_VEC]], <6 x double> poison, <2 x i32> <i32 1, i32 4>
+; CHECK-NEXT: [[STRIDED_VEC2:%.*]] = shufflevector <6 x double> [[WIDE_VEC]], <6 x double> poison, <2 x i32> <i32 2, i32 5>
+; CHECK-NEXT: [[TMP3:%.*]] = fmul <2 x double> [[STRIDED_VEC]], splat (double 3.000000e+00)
+; CHECK-NEXT: [[TMP4:%.*]] = fmul <2 x double> [[STRIDED_VEC1]], splat (double 3.000000e+00)
+; CHECK-NEXT: [[TMP5:%.*]] = fmul <2 x double> [[STRIDED_VEC2]], splat (double 3.000000e+00)
+; CHECK-NEXT: [[TMP6:%.*]] = fadd <2 x double> [[TMP3]], [[TMP4]]
+; CHECK-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], [[TMP5]]
+; CHECK-NEXT: [[GEP_SRC:%.*]] = getelementptr double, ptr [[SRC_1]], i64 [[IV]]
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[GEP_SRC]], align 8
+; CHECK-NEXT: [[TMP9:%.*]] = fmul <2 x double> [[TMP7]], [[WIDE_LOAD]]
+; CHECK-NEXT: [[GEP_SRC_2:%.*]] = getelementptr [[T_2:%.*]], ptr [[SRC_2]], i64 [[IV]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr [[T_2]], ptr [[SRC_2]], i64 [[TMP1]]
+; CHECK-NEXT: [[GEP_72:%.*]] = getelementptr i8, ptr [[GEP_SRC_2]], i64 72
+; CHECK-NEXT: [[TMP13:%.*]] = getelementptr i8, ptr [[TMP11]], i64 72
+; CHECK-NEXT: [[L_P_2:%.*]] = load ptr, ptr [[GEP_72]], align 8
+; CHECK-NEXT: [[TMP15:%.*]] = load ptr, ptr [[TMP13]], align 8
+; CHECK-NEXT: [[LV:%.*]] = load double, ptr [[L_P_2]], align 8
+; CHECK-NEXT: [[TMP17:%.*]] = load double, ptr [[TMP15]], align 8
+; CHECK-NEXT: [[TMP18:%.*]] = insertelement <2 x double> poison, double [[LV]], i32 0
+; CHECK-NEXT: [[TMP19:%.*]] = insertelement <2 x double> [[TMP18]], double [[TMP17]], i32 1
+; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x double> [[TMP9]], [[TMP19]]
+; CHECK-NEXT: [[TMP21]] = call double @llvm.vector.reduce.fadd.v2f64(double [[VEC_PHI]], <2 x double> [[TMP20]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2
+; CHECK-NEXT: br i1 true, label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP21:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: br label %[[EXIT:.*]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: ret double [[TMP21]]
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ]
+ %red = phi double [ 3.000000e+00, %entry ], [ %red.next, %loop ]
+ %gep.0 = getelementptr %t, ptr %src.0, i64 %iv
+ %l.0 = load double, ptr %gep.0, align 8
+ %gep.8 = getelementptr i8, ptr %gep.0, i64 8
+ %l.1 = load double, ptr %gep.8, align 8
+ %gep.16 = getelementptr i8, ptr %gep.0, i64 16
+ %l.2 = load double, ptr %gep.16, align 8
+ %mul.0 = fmul double %l.0, 3.000000e+00
+ %mul.1 = fmul double %l.1, 3.000000e+00
+ %mul.2 = fmul double %l.2, 3.000000e+00
+ %add.0 = fadd double %mul.0, %mul.1
+ %add.1 = fadd double %add.0, %mul.2
+ %gep.src = getelementptr double, ptr %src.1, i64 %iv
+ %l = load double, ptr %gep.src, align 8
+ %mul256.us = fmul double %add.1, %l
+ %gep.src.2 = getelementptr %t.2, ptr %src.2, i64 %iv
+ %gep.72 = getelementptr i8, ptr %gep.src.2, i64 72
+ %l.p.2 = load ptr, ptr %gep.72, align 8
+ %lv = load double, ptr %l.p.2, align 8
+ %red.next = tail call double @llvm.fmuladd.f64(double %mul256.us, double %lv, double %red)
+ %iv.next = add i64 %iv, 1
+ %ec = icmp eq i64 %iv, 1
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret double %red.next
+}
+
+define i32 @test_ptr_iv_load_used_by_other_load(ptr %start, ptr %end) {
+; CHECK-LABEL: define i32 @test_ptr_iv_load_used_by_other_load(
+; CHECK-SAME: ptr [[START:%.*]], ptr [[END:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*]]:
+; CHECK-NEXT: br label %[[LOOP:.*]]
+; CHECK: [[LOOP]]:
+; CHECK-NEXT: [[IV:%.*]] = phi ptr [ [[IV_NEXT:%.*]], %[[LOOP]] ], [ null, %[[ENTRY]] ]
+; CHECK-NEXT: [[RED:%.*]] = phi i32 [ [[RED_NEXT:%.*]], %[[LOOP]] ], [ 0, %[[ENTRY]] ]
+; CHECK-NEXT: [[TMP0:%.*]] = load ptr, ptr [[IV]], align 8
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[TMP0]], align 8
+; CHECK-NEXT: [[C:%.*]] = icmp ne i8 [[TMP1]], 0
+; CHECK-NEXT: [[C_EXT:%.*]] = zext i1 [[C]] to i32
+; CHECK-NEXT: [[RED_NEXT]] = or i32 [[RED]], [[C_EXT]]
+; CHECK-NEXT: [[IV_NEXT]] = getelementptr nusw i8, ptr [[IV]], i64 32
+; CHECK-NEXT: [[EC:%.*]] = icmp eq ptr [[IV]], [[END]]
+; CHECK-NEXT: br i1 [[EC]], label %[[EXIT:.*]], label %[[LOOP]]
+; CHECK: [[EXIT]]:
+; CHECK-NEXT: [[RED_LCSSA:%.*]] = phi i32 [ [[RED]], %[[LOOP]] ]
+; CHECK-NEXT: ret i32 [[RED_LCSSA]]
+;
+entry:
+ br label %loop
+
+loop: ; preds = %loop, %entry
+ %iv = phi ptr [ %iv.next, %loop ], [ null, %entry ]
+ %red = phi i32 [ %red.next, %loop ], [ 0, %entry ]
+ %0 = load ptr, ptr %iv, align 8
+ %1 = load i8, ptr %0, align 8
+ %c = icmp ne i8 %1, 0
+ %c.ext = zext i1 %c to i32
+ %red.next = or i32 %red, %c.ext
+ %iv.next = getelementptr nusw i8, ptr %iv, i64 32
+ %ec = icmp eq ptr %iv, %end
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret i32 %red
+}
+
+
+define i32 @test_or_reduction_with_stride_2(i32 %scale, ptr %src) {
+; CHECK-LABEL: define i32 @test_or_reduction_with_stride_2(
+; CHECK-SAME: i32 [[SCALE:%.*]], ptr [[SRC:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: [[ENTRY:.*:]]
+; CHECK-NEXT: br label %[[VECTOR_PH:.*]]
+; CHECK: [[VECTOR_PH]]:
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[SCALE]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <16 x i32> [[BROADCAST_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer
+; CHECK-NEXT: br label %[[VECTOR_BODY:.*]]
+; CHECK: [[VECTOR_BODY]]:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[VEC_PHI:%.*]] = phi <16 x i32> [ zeroinitializer, %[[VECTOR_PH]] ], [ [[TMP66:%.*]], %[[VECTOR_BODY]] ]
+; CHECK-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[OFFSET_IDX]], 0
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[OFFSET_IDX]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = add i64 [[OFFSET_IDX]], 4
+; CHECK-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 6
+; CHECK-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 8
+; CHECK-NEXT: [[TMP5:%.*]] = add i64 [[OFFSET_IDX]], 10
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[OFFSET_IDX]], 12
+; CHECK-NEXT: [[TMP7:%.*]] = add i64 [[OFFSET_IDX]], 14
+; CHECK-NEXT: [[TMP8:%.*]] = add i64 [[OFFSET_IDX]], 16
+; CHECK-NEXT: [[TMP9:%.*]] = add i64 [[OFFSET_IDX]], 18
+; CHECK-NEXT: [[TMP10:%.*]] = add i64 [[OFFSET_IDX]], 20
+; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[OFFSET_IDX]], 22
+; CHECK-NEXT: [[TMP12:%.*]] = add i64 [[OFFSET_IDX]], 24
+; CHECK-NEXT: [[TMP13:%.*]] = add i64 [[OFFSET_IDX]], 26
+; CHECK-NEXT: [[TMP14:%.*]] = add i64 [[OFFSET_IDX]], 28
+; CHECK-NEXT: [[TMP15:%.*]] = add i64 [[OFFSET_IDX]], 30
+; CHECK-NEXT: [[TMP16:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP2]]
+; CHECK-NEXT: [[TMP19:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP3]]
+; CHECK-NEXT: [[TMP20:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP4]]
+; CHECK-NEXT: [[TMP21:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP5]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP7]]
+; CHECK-NEXT: [[TMP24:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP8]]
+; CHECK-NEXT: [[TMP25:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP9]]
+; CHECK-NEXT: [[TMP26:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP10]]
+; CHECK-NEXT: [[TMP27:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP11]]
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP12]]
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP13]]
+; CHECK-NEXT: [[TMP30:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP14]]
+; CHECK-NEXT: [[TMP31:%.*]] = getelementptr [32 x i8], ptr [[SRC]], i64 [[TMP15]]
+; CHECK-NEXT: [[TMP32:%.*]] = load i8, ptr [[TMP16]], align 1
+; CHECK-NEXT: [[TMP33:%.*]] = load i8, ptr [[TMP17]], align 1
+; CHECK-NEXT: [[TMP34:%.*]] = load i8, ptr [[TMP18]], align 1
+; CHECK-NEXT: [[TMP35:%.*]] = load i8, ptr [[TMP19]], align 1
+; CHECK-NEXT: [[TMP36:%.*]] = load i8, ptr [[TMP20]], align 1
+; CHECK-NEXT: [[TMP37:%.*]] = load i8, ptr [[TMP21]], align 1
+; CHECK-NEXT: [[TMP38:%.*]] = load i8, ptr [[TMP22]], align 1
+; CHECK-NEXT: [[TMP39:%.*]] = load i8, ptr [[TMP23]], align 1
+; CHECK-NEXT: [[TMP40:%.*]] = load i8, ptr [[TMP24]], align 1
+; CHECK-NEXT: [[TMP41:%.*]] = load i8, ptr [[TMP25]], align 1
+; CHECK-NEXT: [[TMP42:%.*]] = load i8, ptr [[TMP26]], align 1
+; CHECK-NEXT: [[TMP43:%.*]] = load i8, ptr [[TMP27]], align 1
+; CHECK-NEXT: [[TMP44:%.*]] = load i8, ptr [[TMP28]], align 1
+; CHECK-NEXT: [[TMP45:%.*]] = load i8, ptr [[TMP29]], align 1
+; CHECK-NEXT: [[TMP46:%.*]] = load i8, ptr [[TMP30]], align 1
+; CHECK-NEXT: [[TMP47:%.*]] = load i8, ptr [[TMP31]], align 1
+; CHECK-NEXT: [[TMP48:%.*]] = insertelement <16 x i8> poison, i8 [[TMP32]], i32 0
+; CHECK-NEXT: [[TMP49:%.*]] = insertelement <16 x i8> [[TMP48]], i8 [[TMP33]], i32 1
+; CHECK-NEXT: [[TMP50:%.*]] = insertelement <16 x i8> [[TMP49]], i8 [[TMP34]], i32 2
+; CHECK-NEXT: [[TMP51:%.*]] = insertelement <16 x i8> [[TMP50]], i8 [[TMP35]], i32 3
+; CHECK-NEXT: [[TMP52:%.*]] = insertelement <16 x i8> [[TMP51]], i8 [[TMP36]], i32 4
+; CHECK-NEXT: [[TMP53:%.*]] = insertelement <16 x i8> [[TMP52]], i8 [[TMP37]], i32 5
+; CHECK-NEXT: [[TMP54:%.*]] = insertelement <16 x i8> [[TMP53]], i8 [[TMP38]], i32 6
+; CHECK-NEXT: [[TMP55:%.*]] = insertelement <16 x i8> [[TMP54]], i8 [[TMP39]], i32 7
+; CHECK-NEXT: [[TMP56:%.*]] = insertelement <16 x i8> [[TMP55]], i8 [[TMP40]], i32 8
+; CHECK-NEXT: [[TMP57:%.*]] = insertelement <16 x i8> [[TMP56]], i8 [[TMP41]], i32 9
+; CHECK-NEXT: [[TMP58:%.*]] = insertelement <16 x i8> [[TMP57]], i8 [[TMP42]], i32 10
+; CHECK-NEXT: [[TMP59:%.*]] = insertelement <16 x i8> [[TMP58]], i8 [[TMP43]], i32 11
+; CHECK-NEXT: [[TMP60:%.*]] = insertelement <16 x i8> [[TMP59]], i8 [[TMP44]], i32 12
+; CHECK-NEXT: [[TMP61:%.*]] = insertelement <16 x i8> [[TMP60]], i8 [[TMP45]], i32 13
+; CHECK-NEXT: [[TMP62:%.*]] = insertelement <16 x i8> [[TMP61]], i8 [[TMP46]], i32 14
+; CHECK-NEXT: [[TMP63:%.*]] = insertelement <16 x i8> [[TMP62]], i8 [[TMP47]], i32 15
+; CHECK-NEXT: [[TMP64:%.*]] = sext <16 x i8> [[TMP63]] to <16 x i32>
+; CHECK-NEXT: [[TMP65:%.*]] = mul <16 x i32> [[BROADCAST_SPLAT]], [[TMP64]]
+; CHECK-NEXT: [[TMP66]] = or <16 x i32> [[TMP65]], [[VEC_PHI]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-NEXT: [[TMP67:%.*]] = icmp eq i64 [[INDEX_NEXT]], 48
+; CHECK-NEXT: br i1 [[TMP67]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP22:![0-9]+]]
+; CHECK: [[MIDDLE_BLOCK]]:
+; CHECK-NEXT: [[TMP68:%.*]] = call i32 @llvm.vector.reduce.or.v16i32(<16 x i32> [[TMP66]])
+; CHECK-NEXT: br label %[[SCALAR_PH:.*]]
+; CHECK: [[SCALAR_PH]]:
+;
+entry:
+ br label %loop
+
+loop:
+ %iv = phi i64 [ %iv.next, %loop ], [ 0, %entry ]
+ %reduction = phi i32 [ %reduction.next, %loop ], [ 0, %entry ]
+ %gep = getelementptr [32 x i8], ptr %src, i64 %iv
+ %load = load i8, ptr %gep, align 1
+ %sext = sext i8 %load to i32
+ %mul = mul i32 %scale, %sext
+ %reduction.next = or i32 %mul, %reduction
+ %iv.next = add i64 %iv, 2
+ %cmp = icmp eq i64 %iv.next, 100
+ br i1 %cmp, label %exit, label %loop
+
+exit:
+ ret i32 %reduction.next
+}
+
+!0 = !{!1, !2, i64 0}
+!1 = !{!"", !2, i64 0}
+!2 = !{!"long long", !3, i64 0}
+!3 = !{!"omnipotent char", !4, i64 0}
+!4 = !{!"Simple C/C++ TBAA"}
+!5 = !{!6, !6, i64 0}
+!6 = !{!"int", !3, i64 0}
More information about the llvm-commits
mailing list