[llvm] 202cd7b - [LV] Extend metadata.ll tests to also check interleaving.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Mon Apr 14 12:53:19 PDT 2025
Author: Florian Hahn
Date: 2025-04-14T21:51:15+02:00
New Revision: 202cd7bb33b8116bbfc6e25eef8ad8446c57bed5
URL: https://github.com/llvm/llvm-project/commit/202cd7bb33b8116bbfc6e25eef8ad8446c57bed5
DIFF: https://github.com/llvm/llvm-project/commit/202cd7bb33b8116bbfc6e25eef8ad8446c57bed5.diff
LOG: [LV] Extend metadata.ll tests to also check interleaving.
Adds missing test coverage for presering existing metadata when
interleaving.
Added:
Modified:
llvm/test/Transforms/LoopVectorize/metadata.ll
Removed:
################################################################################
diff --git a/llvm/test/Transforms/LoopVectorize/metadata.ll b/llvm/test/Transforms/LoopVectorize/metadata.ll
index bb1a1cb1c3529..287a15e3102d1 100644
--- a/llvm/test/Transforms/LoopVectorize/metadata.ll
+++ b/llvm/test/Transforms/LoopVectorize/metadata.ll
@@ -1,5 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -S < %s -passes=loop-vectorize -force-vector-interleave=1 -force-vector-width=2 | FileCheck %s
+; RUN: opt -S < %s -passes=loop-vectorize -force-vector-interleave=2 -force-vector-width=2 | FileCheck --check-prefix=INTERLEAVE %s
; Make sure we are preserving existing metadata in the vectorized code.
@@ -50,6 +51,60 @@ define void @fp_math(ptr nocapture %a, ptr noalias %b, i64 %size) {
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
+; INTERLEAVE-LABEL: define void @fp_math(
+; INTERLEAVE-SAME: ptr captures(none) [[A:%.*]], ptr noalias [[B:%.*]], i64 [[SIZE:%.*]]) {
+; INTERLEAVE-NEXT: [[ENTRY:.*]]:
+; INTERLEAVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SIZE]], 4
+; INTERLEAVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; INTERLEAVE: [[VECTOR_PH]]:
+; INTERLEAVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SIZE]], 4
+; INTERLEAVE-NEXT: [[N_VEC:%.*]] = sub i64 [[SIZE]], [[N_MOD_VF]]
+; INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; INTERLEAVE: [[VECTOR_BODY]]:
+; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr inbounds double, ptr [[TMP0]], i32 0
+; INTERLEAVE-NEXT: [[TMP3:%.*]] = getelementptr inbounds double, ptr [[TMP0]], i32 2
+; INTERLEAVE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP2]], align 4, !tbaa [[TBAA0:![0-9]+]]
+; INTERLEAVE-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x double>, ptr [[TMP3]], align 4, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[TMP4:%.*]] = fadd <2 x double> [[WIDE_LOAD]], splat (double 9.900000e+01), !fpmath [[META3:![0-9]+]]
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = fadd <2 x double> [[WIDE_LOAD1]], splat (double 9.900000e+01), !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = fcmp oge <2 x double> [[TMP4]], splat (double 1.000000e+01)
+; INTERLEAVE-NEXT: [[TMP7:%.*]] = fcmp oge <2 x double> [[TMP5]], splat (double 1.000000e+01)
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = select <2 x i1> [[TMP6]], <2 x double> [[WIDE_LOAD]], <2 x double> zeroinitializer, !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[TMP9:%.*]] = select <2 x i1> [[TMP7]], <2 x double> [[WIDE_LOAD1]], <2 x double> zeroinitializer, !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[TMP10:%.*]] = fptrunc <2 x double> [[TMP8]] to <2 x float>, !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[TMP11:%.*]] = fptrunc <2 x double> [[TMP9]] to <2 x float>, !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[TMP12:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 0
+; INTERLEAVE-NEXT: [[TMP13:%.*]] = getelementptr inbounds float, ptr [[TMP1]], i32 2
+; INTERLEAVE-NEXT: store <2 x float> [[TMP10]], ptr [[TMP12]], align 4, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: store <2 x float> [[TMP11]], ptr [[TMP13]], align 4, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; INTERLEAVE-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; INTERLEAVE-NEXT: br i1 [[TMP14]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; INTERLEAVE: [[MIDDLE_BLOCK]]:
+; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SIZE]], [[N_VEC]]
+; INTERLEAVE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; INTERLEAVE: [[SCALAR_PH]]:
+; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
+; INTERLEAVE: [[LOOP]]:
+; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; INTERLEAVE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
+; INTERLEAVE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds float, ptr [[B]], i64 [[IV]]
+; INTERLEAVE-NEXT: [[L_1:%.*]] = load double, ptr [[ARRAYIDX_1]], align 4, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[ADD:%.*]] = fadd double [[L_1]], 9.900000e+01, !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[C:%.*]] = fcmp oge double [[ADD]], 1.000000e+01
+; INTERLEAVE-NEXT: [[SEL:%.*]] = select i1 [[C]], double [[L_1]], double 0.000000e+00, !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[T:%.*]] = fptrunc double [[SEL]] to float, !fpmath [[META3]]
+; INTERLEAVE-NEXT: store float [[T]], ptr [[ARRAYIDX_2]], align 4, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1
+; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[SIZE]]
+; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP7:![0-9]+]]
+; INTERLEAVE: [[EXIT]]:
+; INTERLEAVE-NEXT: ret void
+;
entry:
br label %loop
@@ -109,6 +164,47 @@ define void @widen_call_range(ptr noalias %a, ptr readonly %b) {
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
+; INTERLEAVE-LABEL: define void @widen_call_range(
+; INTERLEAVE-SAME: ptr noalias [[A:%.*]], ptr readonly [[B:%.*]]) {
+; INTERLEAVE-NEXT: [[ENTRY:.*]]:
+; INTERLEAVE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; INTERLEAVE: [[VECTOR_PH]]:
+; INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; INTERLEAVE: [[VECTOR_BODY]]:
+; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[TMP0]], i32 0
+; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[TMP0]], i32 2
+; INTERLEAVE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 4, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i64>, ptr [[TMP2]], align 4, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[TMP3:%.*]] = call <2 x i64> @foo_vector_fixed2_nomask(<2 x i64> [[WIDE_LOAD]])
+; INTERLEAVE-NEXT: [[TMP4:%.*]] = call <2 x i64> @foo_vector_fixed2_nomask(<2 x i64> [[WIDE_LOAD1]])
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 0
+; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 2
+; INTERLEAVE-NEXT: store <2 x i64> [[TMP3]], ptr [[TMP6]], align 4
+; INTERLEAVE-NEXT: store <2 x i64> [[TMP4]], ptr [[TMP7]], align 4
+; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
+; INTERLEAVE: [[MIDDLE_BLOCK]]:
+; INTERLEAVE-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; INTERLEAVE: [[SCALAR_PH]]:
+; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
+; INTERLEAVE: [[LOOP]]:
+; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
+; INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4, !tbaa [[TBAA0]], !range [[RNG9:![0-9]+]]
+; INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @foo(i64 [[LOAD]]) #[[ATTR1:[0-9]+]], !range [[RNG9]]
+; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
+; INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4
+; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP10:![0-9]+]]
+; INTERLEAVE: [[EXIT]]:
+; INTERLEAVE-NEXT: ret void
+;
entry:
br label %loop
@@ -164,6 +260,47 @@ define void @widen_call_fpmath(ptr noalias %a, ptr readonly %b) {
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
+; INTERLEAVE-LABEL: define void @widen_call_fpmath(
+; INTERLEAVE-SAME: ptr noalias [[A:%.*]], ptr readonly [[B:%.*]]) {
+; INTERLEAVE-NEXT: [[ENTRY:.*]]:
+; INTERLEAVE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; INTERLEAVE: [[VECTOR_PH]]:
+; INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; INTERLEAVE: [[VECTOR_BODY]]:
+; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[TMP0]], i32 0
+; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP0]], i32 2
+; INTERLEAVE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP1]], align 8, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x double>, ptr [[TMP2]], align 8, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[TMP3:%.*]] = call <2 x double> @bar_vector_fixed2_nomask(<2 x double> [[WIDE_LOAD]]), !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[TMP4:%.*]] = call <2 x double> @bar_vector_fixed2_nomask(<2 x double> [[WIDE_LOAD1]]), !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[TMP5]], i32 0
+; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[TMP5]], i32 2
+; INTERLEAVE-NEXT: store <2 x double> [[TMP3]], ptr [[TMP6]], align 8
+; INTERLEAVE-NEXT: store <2 x double> [[TMP4]], ptr [[TMP7]], align 8
+; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; INTERLEAVE: [[MIDDLE_BLOCK]]:
+; INTERLEAVE-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; INTERLEAVE: [[SCALAR_PH]]:
+; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
+; INTERLEAVE: [[LOOP]]:
+; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]]
+; INTERLEAVE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[CALL:%.*]] = call double @bar(double [[LOAD]]) #[[ATTR2:[0-9]+]], !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
+; INTERLEAVE-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8
+; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP12:![0-9]+]]
+; INTERLEAVE: [[EXIT]]:
+; INTERLEAVE-NEXT: ret void
+;
entry:
br label %loop
@@ -219,6 +356,47 @@ define void @widen_intrinsic(ptr noalias %a, ptr readonly %b) {
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
+; INTERLEAVE-LABEL: define void @widen_intrinsic(
+; INTERLEAVE-SAME: ptr noalias [[A:%.*]], ptr readonly [[B:%.*]]) {
+; INTERLEAVE-NEXT: [[ENTRY:.*]]:
+; INTERLEAVE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; INTERLEAVE: [[VECTOR_PH]]:
+; INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; INTERLEAVE: [[VECTOR_BODY]]:
+; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr i64, ptr [[B]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr i64, ptr [[TMP0]], i32 0
+; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr i64, ptr [[TMP0]], i32 2
+; INTERLEAVE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x i64>, ptr [[TMP1]], align 4
+; INTERLEAVE-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x i64>, ptr [[TMP2]], align 4
+; INTERLEAVE-NEXT: [[TMP3:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> [[WIDE_LOAD]], i1 true)
+; INTERLEAVE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.abs.v2i64(<2 x i64> [[WIDE_LOAD1]], i1 true)
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 0
+; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds i64, ptr [[TMP5]], i32 2
+; INTERLEAVE-NEXT: store <2 x i64> [[TMP3]], ptr [[TMP6]], align 4
+; INTERLEAVE-NEXT: store <2 x i64> [[TMP4]], ptr [[TMP7]], align 4
+; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP13:![0-9]+]]
+; INTERLEAVE: [[MIDDLE_BLOCK]]:
+; INTERLEAVE-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; INTERLEAVE: [[SCALAR_PH]]:
+; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
+; INTERLEAVE: [[LOOP]]:
+; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr i64, ptr [[B]], i64 [[IV]]
+; INTERLEAVE-NEXT: [[LOAD:%.*]] = load i64, ptr [[GEP]], align 4
+; INTERLEAVE-NEXT: [[CALL:%.*]] = call i64 @llvm.abs.i64(i64 [[LOAD]], i1 true), !range [[RNG9]]
+; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[IV]]
+; INTERLEAVE-NEXT: store i64 [[CALL]], ptr [[ARRAYIDX]], align 4
+; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP14:![0-9]+]]
+; INTERLEAVE: [[EXIT]]:
+; INTERLEAVE-NEXT: ret void
+;
entry:
br label %loop
@@ -274,6 +452,47 @@ define void @widen_intrinsic_fpmath(ptr noalias %a, ptr readonly %b) {
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
+; INTERLEAVE-LABEL: define void @widen_intrinsic_fpmath(
+; INTERLEAVE-SAME: ptr noalias [[A:%.*]], ptr readonly [[B:%.*]]) {
+; INTERLEAVE-NEXT: [[ENTRY:.*]]:
+; INTERLEAVE-NEXT: br i1 false, label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; INTERLEAVE: [[VECTOR_PH]]:
+; INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; INTERLEAVE: [[VECTOR_BODY]]:
+; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr double, ptr [[B]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr double, ptr [[TMP0]], i32 0
+; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr double, ptr [[TMP0]], i32 2
+; INTERLEAVE-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP1]], align 8, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[WIDE_LOAD1:%.*]] = load <2 x double>, ptr [[TMP2]], align 8, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[TMP3:%.*]] = call <2 x double> @llvm.sin.v2f64(<2 x double> [[WIDE_LOAD]]), !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[TMP4:%.*]] = call <2 x double> @llvm.sin.v2f64(<2 x double> [[WIDE_LOAD1]]), !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[INDEX]]
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds double, ptr [[TMP5]], i32 0
+; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds double, ptr [[TMP5]], i32 2
+; INTERLEAVE-NEXT: store <2 x double> [[TMP3]], ptr [[TMP6]], align 8
+; INTERLEAVE-NEXT: store <2 x double> [[TMP4]], ptr [[TMP7]], align 8
+; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP15:![0-9]+]]
+; INTERLEAVE: [[MIDDLE_BLOCK]]:
+; INTERLEAVE-NEXT: br i1 true, label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; INTERLEAVE: [[SCALAR_PH]]:
+; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ 1024, %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
+; INTERLEAVE: [[LOOP]]:
+; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; INTERLEAVE-NEXT: [[GEP:%.*]] = getelementptr double, ptr [[B]], i64 [[IV]]
+; INTERLEAVE-NEXT: [[LOAD:%.*]] = load double, ptr [[GEP]], align 8, !tbaa [[TBAA0]]
+; INTERLEAVE-NEXT: [[CALL:%.*]] = call double @llvm.sin.f64(double [[LOAD]]) #[[ATTR2]], !fpmath [[META3]]
+; INTERLEAVE-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double, ptr [[A]], i64 [[IV]]
+; INTERLEAVE-NEXT: store double [[CALL]], ptr [[ARRAYIDX]], align 8
+; INTERLEAVE-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], 1024
+; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[EXIT]], label %[[LOOP]], !llvm.loop [[LOOP16:![0-9]+]]
+; INTERLEAVE: [[EXIT]]:
+; INTERLEAVE-NEXT: ret void
+;
entry:
br label %loop
@@ -337,6 +556,57 @@ define void @unknown_metadata(ptr nocapture %a, ptr noalias %b, i64 %size) {
; CHECK: [[EXIT]]:
; CHECK-NEXT: ret void
;
+; INTERLEAVE-LABEL: define void @unknown_metadata(
+; INTERLEAVE-SAME: ptr captures(none) [[A:%.*]], ptr noalias [[B:%.*]], i64 [[SIZE:%.*]]) {
+; INTERLEAVE-NEXT: [[ENTRY:.*]]:
+; INTERLEAVE-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[SIZE]], 4
+; INTERLEAVE-NEXT: br i1 [[MIN_ITERS_CHECK]], label %[[SCALAR_PH:.*]], label %[[VECTOR_PH:.*]]
+; INTERLEAVE: [[VECTOR_PH]]:
+; INTERLEAVE-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[SIZE]], 4
+; INTERLEAVE-NEXT: [[N_VEC:%.*]] = sub i64 [[SIZE]], [[N_MOD_VF]]
+; INTERLEAVE-NEXT: br label %[[VECTOR_BODY:.*]]
+; INTERLEAVE: [[VECTOR_BODY]]:
+; INTERLEAVE-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[VEC_IND:%.*]] = phi <2 x i64> [ <i64 0, i64 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], %[[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[VEC_IND1:%.*]] = phi <2 x i32> [ <i32 0, i32 1>, %[[VECTOR_PH]] ], [ [[VEC_IND_NEXT2:%.*]], %[[VECTOR_BODY]] ]
+; INTERLEAVE-NEXT: [[STEP_ADD:%.*]] = add <2 x i64> [[VEC_IND]], splat (i64 2)
+; INTERLEAVE-NEXT: [[STEP_ADD3:%.*]] = add <2 x i32> [[VEC_IND1]], splat (i32 2)
+; INTERLEAVE-NEXT: [[TMP0:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[INDEX]], !custom_md [[META2:![0-9]+]]
+; INTERLEAVE-NEXT: [[TMP1:%.*]] = getelementptr inbounds i32, ptr [[B]], <2 x i64> [[VEC_IND]]
+; INTERLEAVE-NEXT: [[TMP2:%.*]] = getelementptr inbounds i32, ptr [[B]], <2 x i64> [[STEP_ADD]]
+; INTERLEAVE-NEXT: [[TMP3:%.*]] = extractelement <2 x ptr> [[TMP1]], i32 0
+; INTERLEAVE-NEXT: [[TMP4:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 0
+; INTERLEAVE-NEXT: [[TMP5:%.*]] = getelementptr inbounds i32, ptr [[TMP3]], i32 2
+; INTERLEAVE-NEXT: store <2 x i32> [[VEC_IND1]], ptr [[TMP4]], align 4
+; INTERLEAVE-NEXT: store <2 x i32> [[STEP_ADD3]], ptr [[TMP5]], align 4
+; INTERLEAVE-NEXT: [[TMP6:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i32 0
+; INTERLEAVE-NEXT: [[TMP7:%.*]] = getelementptr inbounds ptr, ptr [[TMP0]], i32 2
+; INTERLEAVE-NEXT: store <2 x ptr> [[TMP1]], ptr [[TMP6]], align 8
+; INTERLEAVE-NEXT: store <2 x ptr> [[TMP2]], ptr [[TMP7]], align 8
+; INTERLEAVE-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; INTERLEAVE-NEXT: [[VEC_IND_NEXT]] = add <2 x i64> [[STEP_ADD]], splat (i64 2)
+; INTERLEAVE-NEXT: [[VEC_IND_NEXT2]] = add <2 x i32> [[STEP_ADD3]], splat (i32 2)
+; INTERLEAVE-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; INTERLEAVE-NEXT: br i1 [[TMP8]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP17:![0-9]+]]
+; INTERLEAVE: [[MIDDLE_BLOCK]]:
+; INTERLEAVE-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[SIZE]], [[N_VEC]]
+; INTERLEAVE-NEXT: br i1 [[CMP_N]], label %[[EXIT:.*]], label %[[SCALAR_PH]]
+; INTERLEAVE: [[SCALAR_PH]]:
+; INTERLEAVE-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], %[[MIDDLE_BLOCK]] ], [ 0, %[[ENTRY]] ]
+; INTERLEAVE-NEXT: br label %[[LOOP:.*]]
+; INTERLEAVE: [[LOOP]]:
+; INTERLEAVE-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], %[[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], %[[LOOP]] ]
+; INTERLEAVE-NEXT: [[ARRAYIDX_1:%.*]] = getelementptr inbounds ptr, ptr [[A]], i64 [[IV]], !custom_md [[META2]]
+; INTERLEAVE-NEXT: [[ARRAYIDX_2:%.*]] = getelementptr inbounds i32, ptr [[B]], i64 [[IV]], !custom_md [[META2]]
+; INTERLEAVE-NEXT: [[T:%.*]] = trunc i64 [[IV]] to i32, !custom_md [[META3]]
+; INTERLEAVE-NEXT: store i32 [[T]], ptr [[ARRAYIDX_2]], align 4, !custom_md [[META2]]
+; INTERLEAVE-NEXT: store ptr [[ARRAYIDX_2]], ptr [[ARRAYIDX_1]], align 8, !custom_md [[META2]]
+; INTERLEAVE-NEXT: [[IV_NEXT]] = add i64 [[IV]], 1, !custom_md [[META2]]
+; INTERLEAVE-NEXT: [[EXITCOND:%.*]] = icmp ne i64 [[IV_NEXT]], [[SIZE]], !custom_md [[META2]]
+; INTERLEAVE-NEXT: br i1 [[EXITCOND]], label %[[LOOP]], label %[[EXIT]], !llvm.loop [[LOOP18:![0-9]+]], !custom_md [[META2]]
+; INTERLEAVE: [[EXIT]]:
+; INTERLEAVE-NEXT: ret void
+;
entry:
br label %loop
@@ -390,3 +660,23 @@ attributes #1 = { nounwind "vector-function-abi-variant"="_ZGV_LLVM_N2v_bar(bar_
; CHECK: [[LOOP17]] = distinct !{[[LOOP17]], [[META5]], [[META6]]}
; CHECK: [[LOOP18]] = distinct !{[[LOOP18]], [[META6]], [[META5]]}
;.
+; INTERLEAVE: [[TBAA0]] = !{[[META1:![0-9]+]], [[META1]], i64 0, i64 0}
+; INTERLEAVE: [[META1]] = !{!"omnipotent char", [[META2]]}
+; INTERLEAVE: [[META2]] = !{!"Simple C/C++ TBAA"}
+; INTERLEAVE: [[META3]] = !{float 2.500000e+00}
+; INTERLEAVE: [[LOOP4]] = distinct !{[[LOOP4]], [[META5:![0-9]+]], [[META6:![0-9]+]]}
+; INTERLEAVE: [[META5]] = !{!"llvm.loop.isvectorized", i32 1}
+; INTERLEAVE: [[META6]] = !{!"llvm.loop.unroll.runtime.disable"}
+; INTERLEAVE: [[LOOP7]] = distinct !{[[LOOP7]], [[META6]], [[META5]]}
+; INTERLEAVE: [[LOOP8]] = distinct !{[[LOOP8]], [[META5]], [[META6]]}
+; INTERLEAVE: [[RNG9]] = !{i64 0, i64 2}
+; INTERLEAVE: [[LOOP10]] = distinct !{[[LOOP10]], [[META6]], [[META5]]}
+; INTERLEAVE: [[LOOP11]] = distinct !{[[LOOP11]], [[META5]], [[META6]]}
+; INTERLEAVE: [[LOOP12]] = distinct !{[[LOOP12]], [[META6]], [[META5]]}
+; INTERLEAVE: [[LOOP13]] = distinct !{[[LOOP13]], [[META5]], [[META6]]}
+; INTERLEAVE: [[LOOP14]] = distinct !{[[LOOP14]], [[META6]], [[META5]]}
+; INTERLEAVE: [[LOOP15]] = distinct !{[[LOOP15]], [[META5]], [[META6]]}
+; INTERLEAVE: [[LOOP16]] = distinct !{[[LOOP16]], [[META6]], [[META5]]}
+; INTERLEAVE: [[LOOP17]] = distinct !{[[LOOP17]], [[META5]], [[META6]]}
+; INTERLEAVE: [[LOOP18]] = distinct !{[[LOOP18]], [[META6]], [[META5]]}
+;.
More information about the llvm-commits
mailing list