[llvm] 88de27e - [LV] Handle non-integral types when considering interleave widening legality

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Sun Apr 3 20:16:53 PDT 2022


Author: Philip Reames
Date: 2022-04-03T20:16:20-07:00
New Revision: 88de27e3fd9fccec9abd1d224282a6374931fb64

URL: https://github.com/llvm/llvm-project/commit/88de27e3fd9fccec9abd1d224282a6374931fb64
DIFF: https://github.com/llvm/llvm-project/commit/88de27e3fd9fccec9abd1d224282a6374931fb64.diff

LOG: [LV] Handle non-integral types when considering interleave widening legality

In general, anywhere we might need to insert a blind bitcast, we need to make sure the types are losslessly convertible.

This fixes pr54634.

Added: 
    llvm/test/Transforms/LoopVectorize/X86/pr54634.ll

Modified: 
    llvm/lib/Transforms/Vectorize/LoopVectorize.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 241e86b29ad77..ef837c5c18242 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -4511,6 +4511,27 @@ bool LoopVectorizationCostModel::interleavedAccessCanBeWidened(
   if (hasIrregularType(ScalarTy, DL))
     return false;
 
+  // If the group involves a non-integral pointer, we may not be able to
+  // losslessly cast all values to a common type.
+  unsigned InterleaveFactor = Group->getFactor();
+  bool ScalarNI = DL.isNonIntegralPointerType(ScalarTy);
+  for (unsigned i = 0; i < InterleaveFactor; i++) {
+    Instruction *Member = Group->getMember(i);
+    if (!Member)
+      continue;
+    auto *MemberTy = getLoadStoreType(Member);
+    bool MemberNI = DL.isNonIntegralPointerType(MemberTy);
+    // Don't coerce non-integral pointers to integers or vice versa.
+    if (MemberNI != ScalarNI) {
+      // TODO: Consider adding special nullptr value case here
+      return false;
+    } else if (MemberNI && ScalarNI &&
+               ScalarTy->getPointerAddressSpace() !=
+               MemberTy->getPointerAddressSpace()) {
+      return false;
+    }
+  }
+
   // Check if masking is required.
   // A Group may need masking for one of two reasons: it resides in a block that
   // needs predication, or it was decided to use masking to deal with gaps

diff  --git a/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll b/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll
new file mode 100644
index 0000000000000..5419efd454c73
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/X86/pr54634.ll
@@ -0,0 +1,155 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -loop-vectorize < %s -mcpu=skylake | FileCheck %s
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128-ni:10:11:12:13"
+target triple = "x86_64-unknown-linux-gnu"
+
+ at jlplt_ijl_alloc_array_1d_10294_got = external dso_local local_unnamed_addr global void ()*
+
+define {} addrspace(10)* @japi1_vect_42283({} addrspace(10)** nocapture readonly %0, i32 %1) local_unnamed_addr #0 {
+; CHECK-LABEL: @japi1_vect_42283(
+; CHECK-NEXT:  top:
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i32 [[TMP1:%.*]] to i64
+; CHECK-NEXT:    [[TMP3:%.*]] = load atomic {} addrspace(10)* ({} addrspace(10)*, i64)*, {} addrspace(10)* ({} addrspace(10)*, i64)** bitcast (void ()** @jlplt_ijl_alloc_array_1d_10294_got to {} addrspace(10)* ({} addrspace(10)*, i64)**) unordered, align 8
+; CHECK-NEXT:    [[TMP4:%.*]] = tail call {} addrspace(10)* [[TMP3]]({} addrspace(10)* null, i64 0)
+; CHECK-NEXT:    [[TMP5:%.*]] = bitcast {} addrspace(10)** [[TMP0:%.*]] to { {} addrspace(10)*, i64 } addrspace(10)**
+; CHECK-NEXT:    [[TMP6:%.*]] = load { {} addrspace(10)*, i64 } addrspace(10)*, { {} addrspace(10)*, i64 } addrspace(10)** [[TMP5]], align 8, !tbaa [[TBAA0:![0-9]+]]
+; CHECK-NEXT:    [[TMP7:%.*]] = bitcast {} addrspace(10)* [[TMP4]] to { {} addrspace(10)*, i64 } addrspace(13)* addrspace(10)*
+; CHECK-NEXT:    [[TMP8:%.*]] = addrspacecast { {} addrspace(10)*, i64 } addrspace(13)* addrspace(10)* [[TMP7]] to { {} addrspace(10)*, i64 } addrspace(13)* addrspace(11)*
+; CHECK-NEXT:    [[TMP9:%.*]] = load { {} addrspace(10)*, i64 } addrspace(13)*, { {} addrspace(10)*, i64 } addrspace(13)* addrspace(11)* [[TMP8]], align 8, !tbaa [[TBAA5:![0-9]+]]
+; CHECK-NEXT:    [[TMP10:%.*]] = bitcast { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]] to i8 addrspace(13)*
+; CHECK-NEXT:    [[DOTELT:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(10)* [[TMP6]], i64 0, i32 0
+; CHECK-NEXT:    [[DOTUNPACK:%.*]] = load {} addrspace(10)*, {} addrspace(10)* addrspace(10)* [[DOTELT]], align 8, !tbaa [[TBAA8:![0-9]+]]
+; CHECK-NEXT:    [[DOTELT1:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(10)* [[TMP6]], i64 0, i32 1
+; CHECK-NEXT:    [[DOTUNPACK2:%.*]] = load i64, i64 addrspace(10)* [[DOTELT1]], align 8, !tbaa [[TBAA8]]
+; CHECK-NEXT:    [[TMP11:%.*]] = add nsw i64 [[TMP2]], 1
+; CHECK-NEXT:    [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP11]], 16
+; CHECK-NEXT:    br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_SCEVCHECK:%.*]]
+; CHECK:       vector.scevcheck:
+; CHECK-NEXT:    [[MUL:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[TMP2]])
+; CHECK-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i64, i1 } [[MUL]], 0
+; CHECK-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i64, i1 } [[MUL]], 1
+; CHECK-NEXT:    [[TMP12:%.*]] = sub i64 0, [[MUL_RESULT]]
+; CHECK-NEXT:    [[TMP13:%.*]] = getelementptr i8, i8 addrspace(13)* [[TMP10]], i64 [[MUL_RESULT]]
+; CHECK-NEXT:    [[TMP14:%.*]] = icmp ult i8 addrspace(13)* [[TMP13]], [[TMP10]]
+; CHECK-NEXT:    [[TMP15:%.*]] = or i1 [[TMP14]], [[MUL_OVERFLOW]]
+; CHECK-NEXT:    [[SCEVGEP:%.*]] = getelementptr { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], i64 0, i32 1
+; CHECK-NEXT:    [[SCEVGEP1:%.*]] = bitcast i64 addrspace(13)* [[SCEVGEP]] to { {} addrspace(10)*, i64 } addrspace(13)*
+; CHECK-NEXT:    [[MUL2:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 16, i64 [[TMP2]])
+; CHECK-NEXT:    [[MUL_RESULT3:%.*]] = extractvalue { i64, i1 } [[MUL2]], 0
+; CHECK-NEXT:    [[MUL_OVERFLOW4:%.*]] = extractvalue { i64, i1 } [[MUL2]], 1
+; CHECK-NEXT:    [[SCEVGEP15:%.*]] = bitcast { {} addrspace(10)*, i64 } addrspace(13)* [[SCEVGEP1]] to i8 addrspace(13)*
+; CHECK-NEXT:    [[TMP16:%.*]] = sub i64 0, [[MUL_RESULT3]]
+; CHECK-NEXT:    [[TMP17:%.*]] = getelementptr i8, i8 addrspace(13)* [[SCEVGEP15]], i64 [[MUL_RESULT3]]
+; CHECK-NEXT:    [[TMP18:%.*]] = icmp ult i8 addrspace(13)* [[TMP17]], [[SCEVGEP15]]
+; CHECK-NEXT:    [[TMP19:%.*]] = or i1 [[TMP18]], [[MUL_OVERFLOW4]]
+; CHECK-NEXT:    [[TMP20:%.*]] = or i1 [[TMP15]], [[TMP19]]
+; CHECK-NEXT:    br i1 [[TMP20]], label [[SCALAR_PH]], label [[VECTOR_PH:%.*]]
+; CHECK:       vector.ph:
+; CHECK-NEXT:    [[N_MOD_VF:%.*]] = urem i64 [[TMP11]], 16
+; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[TMP11]], [[N_MOD_VF]]
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x {} addrspace(10)*> poison, {} addrspace(10)* [[DOTUNPACK]], i32 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x {} addrspace(10)*> [[BROADCAST_SPLATINSERT]], <4 x {} addrspace(10)*> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT9:%.*]] = insertelement <4 x {} addrspace(10)*> poison, {} addrspace(10)* [[DOTUNPACK]], i32 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT10:%.*]] = shufflevector <4 x {} addrspace(10)*> [[BROADCAST_SPLATINSERT9]], <4 x {} addrspace(10)*> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT11:%.*]] = insertelement <4 x {} addrspace(10)*> poison, {} addrspace(10)* [[DOTUNPACK]], i32 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT12:%.*]] = shufflevector <4 x {} addrspace(10)*> [[BROADCAST_SPLATINSERT11]], <4 x {} addrspace(10)*> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT13:%.*]] = insertelement <4 x {} addrspace(10)*> poison, {} addrspace(10)* [[DOTUNPACK]], i32 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT14:%.*]] = shufflevector <4 x {} addrspace(10)*> [[BROADCAST_SPLATINSERT13]], <4 x {} addrspace(10)*> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT15:%.*]] = insertelement <4 x i64> poison, i64 [[DOTUNPACK2]], i32 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT16:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT15]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT17:%.*]] = insertelement <4 x i64> poison, i64 [[DOTUNPACK2]], i32 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT18:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT17]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT19:%.*]] = insertelement <4 x i64> poison, i64 [[DOTUNPACK2]], i32 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT20:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT19]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    [[BROADCAST_SPLATINSERT21:%.*]] = insertelement <4 x i64> poison, i64 [[DOTUNPACK2]], i32 0
+; CHECK-NEXT:    [[BROADCAST_SPLAT22:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT21]], <4 x i64> poison, <4 x i32> zeroinitializer
+; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
+; CHECK:       vector.body:
+; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[VEC_IND:%.*]] = phi <4 x i64> [ <i64 0, i64 1, i64 2, i64 3>, [[VECTOR_PH]] ], [ [[VEC_IND_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT:    [[STEP_ADD:%.*]] = add <4 x i64> [[VEC_IND]], <i64 4, i64 4, i64 4, i64 4>
+; CHECK-NEXT:    [[STEP_ADD6:%.*]] = add <4 x i64> [[STEP_ADD]], <i64 4, i64 4, i64 4, i64 4>
+; CHECK-NEXT:    [[STEP_ADD7:%.*]] = add <4 x i64> [[STEP_ADD6]], <i64 4, i64 4, i64 4, i64 4>
+; CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], <4 x i64> [[VEC_IND]], i32 0
+; CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], <4 x i64> [[STEP_ADD]], i32 0
+; CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], <4 x i64> [[STEP_ADD6]], i32 0
+; CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], <4 x i64> [[STEP_ADD7]], i32 0
+; CHECK-NEXT:    call void @llvm.masked.scatter.v4p10sl_s.v4p13p10sl_s(<4 x {} addrspace(10)*> [[BROADCAST_SPLAT]], <4 x {} addrspace(10)* addrspace(13)*> [[TMP21]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa [[TBAA10:![0-9]+]]
+; CHECK-NEXT:    call void @llvm.masked.scatter.v4p10sl_s.v4p13p10sl_s(<4 x {} addrspace(10)*> [[BROADCAST_SPLAT10]], <4 x {} addrspace(10)* addrspace(13)*> [[TMP22]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa [[TBAA10]]
+; CHECK-NEXT:    call void @llvm.masked.scatter.v4p10sl_s.v4p13p10sl_s(<4 x {} addrspace(10)*> [[BROADCAST_SPLAT12]], <4 x {} addrspace(10)* addrspace(13)*> [[TMP23]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa [[TBAA10]]
+; CHECK-NEXT:    call void @llvm.masked.scatter.v4p10sl_s.v4p13p10sl_s(<4 x {} addrspace(10)*> [[BROADCAST_SPLAT14]], <4 x {} addrspace(10)* addrspace(13)*> [[TMP24]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa [[TBAA10]]
+; CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], <4 x i64> [[VEC_IND]], i32 1
+; CHECK-NEXT:    [[TMP26:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], <4 x i64> [[STEP_ADD]], i32 1
+; CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], <4 x i64> [[STEP_ADD6]], i32 1
+; CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], <4 x i64> [[STEP_ADD7]], i32 1
+; CHECK-NEXT:    call void @llvm.masked.scatter.v4i64.v4p13i64(<4 x i64> [[BROADCAST_SPLAT16]], <4 x i64 addrspace(13)*> [[TMP25]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa [[TBAA10]]
+; CHECK-NEXT:    call void @llvm.masked.scatter.v4i64.v4p13i64(<4 x i64> [[BROADCAST_SPLAT18]], <4 x i64 addrspace(13)*> [[TMP26]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa [[TBAA10]]
+; CHECK-NEXT:    call void @llvm.masked.scatter.v4i64.v4p13i64(<4 x i64> [[BROADCAST_SPLAT20]], <4 x i64 addrspace(13)*> [[TMP27]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa [[TBAA10]]
+; CHECK-NEXT:    call void @llvm.masked.scatter.v4i64.v4p13i64(<4 x i64> [[BROADCAST_SPLAT22]], <4 x i64 addrspace(13)*> [[TMP28]], i32 8, <4 x i1> <i1 true, i1 true, i1 true, i1 true>), !tbaa [[TBAA10]]
+; CHECK-NEXT:    [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 16
+; CHECK-NEXT:    [[VEC_IND_NEXT]] = add <4 x i64> [[STEP_ADD7]], <i64 4, i64 4, i64 4, i64 4>
+; CHECK-NEXT:    [[TMP29:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[TMP29]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; CHECK:       middle.block:
+; CHECK-NEXT:    [[CMP_N:%.*]] = icmp eq i64 [[TMP11]], [[N_VEC]]
+; CHECK-NEXT:    br i1 [[CMP_N]], label [[L44:%.*]], label [[SCALAR_PH]]
+; CHECK:       scalar.ph:
+; CHECK-NEXT:    [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[TOP:%.*]] ], [ 0, [[VECTOR_SCEVCHECK]] ]
+; CHECK-NEXT:    br label [[L26:%.*]]
+; CHECK:       L26:
+; CHECK-NEXT:    [[VALUE_PHI5:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[TMP30:%.*]], [[L26]] ]
+; CHECK-NEXT:    [[DOTREPACK:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], i64 [[VALUE_PHI5]], i32 0
+; CHECK-NEXT:    store {} addrspace(10)* [[DOTUNPACK]], {} addrspace(10)* addrspace(13)* [[DOTREPACK]], align 8, !tbaa [[TBAA10]]
+; CHECK-NEXT:    [[DOTREPACK4:%.*]] = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* [[TMP9]], i64 [[VALUE_PHI5]], i32 1
+; CHECK-NEXT:    store i64 [[DOTUNPACK2]], i64 addrspace(13)* [[DOTREPACK4]], align 8, !tbaa [[TBAA10]]
+; CHECK-NEXT:    [[TMP30]] = add i64 [[VALUE_PHI5]], 1
+; CHECK-NEXT:    [[DOTNOT:%.*]] = icmp eq i64 [[VALUE_PHI5]], [[TMP2]]
+; CHECK-NEXT:    br i1 [[DOTNOT]], label [[L44]], label [[L26]], !llvm.loop [[LOOP14:![0-9]+]]
+; CHECK:       L44:
+; CHECK-NEXT:    ret {} addrspace(10)* null
+;
+top:
+  %2 = sext i32 %1 to i64
+  %3 = load atomic {} addrspace(10)* ({} addrspace(10)*, i64)*, {} addrspace(10)* ({} addrspace(10)*, i64)** bitcast (void ()** @jlplt_ijl_alloc_array_1d_10294_got to {} addrspace(10)* ({} addrspace(10)*, i64)**) unordered, align 8
+  %4 = tail call {} addrspace(10)* %3({} addrspace(10)* null, i64 0)
+  %5 = bitcast {} addrspace(10)** %0 to { {} addrspace(10)*, i64 } addrspace(10)**
+  %6 = load { {} addrspace(10)*, i64 } addrspace(10)*, { {} addrspace(10)*, i64 } addrspace(10)** %5, align 8, !tbaa !0
+  %7 = bitcast {} addrspace(10)* %4 to { {} addrspace(10)*, i64 } addrspace(13)* addrspace(10)*
+  %8 = addrspacecast { {} addrspace(10)*, i64 } addrspace(13)* addrspace(10)* %7 to { {} addrspace(10)*, i64 } addrspace(13)* addrspace(11)*
+  %9 = load { {} addrspace(10)*, i64 } addrspace(13)*, { {} addrspace(10)*, i64 } addrspace(13)* addrspace(11)* %8, align 8, !tbaa !5
+  %.elt = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(10)* %6, i64 0, i32 0
+  %.unpack = load {} addrspace(10)*, {} addrspace(10)* addrspace(10)* %.elt, align 8, !tbaa !8
+  %.elt1 = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(10)* %6, i64 0, i32 1
+  %.unpack2 = load i64, i64 addrspace(10)* %.elt1, align 8, !tbaa !8
+  br label %L26
+
+L26:                                              ; preds = %L26, %top
+  %value_phi5 = phi i64 [ 0, %top ], [ %10, %L26 ]
+  %.repack = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* %9, i64 %value_phi5, i32 0
+  store {} addrspace(10)* %.unpack, {} addrspace(10)* addrspace(13)* %.repack, align 8, !tbaa !10
+  %.repack4 = getelementptr inbounds { {} addrspace(10)*, i64 }, { {} addrspace(10)*, i64 } addrspace(13)* %9, i64 %value_phi5, i32 1
+  store i64 %.unpack2, i64 addrspace(13)* %.repack4, align 8, !tbaa !10
+  %10 = add i64 %value_phi5, 1
+  %.not = icmp eq i64 %value_phi5, %2
+  br i1 %.not, label %L44, label %L26
+
+L44:                                              ; preds = %L26
+  ret {} addrspace(10)* null
+}
+
+attributes #0 = { "target-cpu"="skylake-avx512" "target-features"="+xsaves,+xsavec,+prfchw,+lzcnt,+sahf,+pku,+avx512vl,+avx512bw,+avx512cd,+clwb,+clflushopt,+adx,+avx512dq,+avx512f,+bmi2,+avx2,+bmi,+fsgsbase,+f16c,+avx,+xsave,+aes,+popcnt,+movbe,+sse4.2,+sse4.1,+cx16,+fma,+ssse3,+pclmul,+sse3,-rdrnd,-rtm,-rdseed,-avx512ifma,-avx512pf,-avx512er,-sha,-prefetchwt1,-avx512vbmi,-waitpkg,-avx512vbmi2,-shstk,-gfni,-vaes,-vpclmulqdq,-avx512vnni,-avx512bitalg,-avx512vpopcntdq,-rdpid,-cldemote,-movdiri,-movdir64b,-enqcmd,-avx512vp2intersect,-serialize,-tsxldtrk,-pconfig,-amx-bf16,-amx-tile,-amx-int8,-sse4a,-xop,-lwp,-fma4,-tbm,-mwaitx,-xsaveopt,-clzero,-wbnoinvd,-avx512bf16,-ptwrite,+sse2,+mmx,+fxsr,+64bit,+cx8" }
+attributes #1 = { inaccessiblemem_or_argmemonly }
+attributes #2 = { allocsize(1) }
+
+!0 = !{!1, !1, i64 0}
+!1 = !{!"jtbaa_value", !2, i64 0}
+!2 = !{!"jtbaa_data", !3, i64 0}
+!3 = !{!"jtbaa", !4, i64 0}
+!4 = !{!"jtbaa"}
+!5 = !{!6, !6, i64 0}
+!6 = !{!"jtbaa_arrayptr", !7, i64 0}
+!7 = !{!"jtbaa_array", !3, i64 0}
+!8 = !{!9, !9, i64 0}
+!9 = !{!"jtbaa_immut", !1, i64 0}
+!10 = !{!11, !11, i64 0}
+!11 = !{!"jtbaa_arraybuf", !2, i64 0}


        


More information about the llvm-commits mailing list