[llvm] [LV] Autovectorization for the all-in-one histogram intrinsic (PR #91458)
Graham Hunter via llvm-commits
llvm-commits at lists.llvm.org
Tue Jun 25 06:10:00 PDT 2024
https://github.com/huntergr-arm updated https://github.com/llvm/llvm-project/pull/91458
>From d3664e4a713e27d4f386cf2c19f46b0ebd29d2cb Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Tue, 11 Jun 2024 14:58:55 +0000
Subject: [PATCH 1/3] Initial tests for histogram autovec
---
.../Analysis/LoopAccessAnalysis/histogram.ll | 41 +++
.../LoopVectorize/AArch64/sve2-histcnt.ll | 275 ++++++++++++++++++
2 files changed, 316 insertions(+)
create mode 100644 llvm/test/Analysis/LoopAccessAnalysis/histogram.ll
create mode 100644 llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/histogram.ll b/llvm/test/Analysis/LoopAccessAnalysis/histogram.ll
new file mode 100644
index 0000000000000..32636c3878b92
--- /dev/null
+++ b/llvm/test/Analysis/LoopAccessAnalysis/histogram.ll
@@ -0,0 +1,41 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py UTC_ARGS: --version 5
+; RUN: opt -disable-output -passes='print<access-info>' %s 2>&1 | FileCheck %s
+
+
+define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %N) {
+; CHECK-LABEL: 'simple_histogram'
+; CHECK-NEXT: for.body:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Unknown:
+; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4 ->
+; CHECK-NEXT: store i32 %inc, ptr %arrayidx2, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %indices, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %idxprom1 = zext i32 %0 to i64
+ %arrayidx2 = getelementptr inbounds i32, ptr %buckets, i64 %idxprom1
+ %1 = load i32, ptr %arrayidx2, align 4
+ %inc = add nsw i32 %1, 1
+ store i32 %inc, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, %N
+ br i1 %exitcond, label %for.exit, label %for.body
+
+for.exit:
+ ret void
+}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
new file mode 100644
index 0000000000000..283baf61b7393
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
@@ -0,0 +1,275 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 3
+; RUN: opt < %s -passes=loop-vectorize -force-vector-interleave=1 -sve-gather-overhead=2 -sve-scatter-overhead=2 -S | FileCheck %s
+
+target triple = "aarch64-unknown-linux-gnu"
+
+;; Based on the following C code:
+;;
+;; void simple_histogram(int *buckets, unsigned *indices, int N) {
+;; for (int i = 0; i < N; ++i)
+;; buckets[indices[i]]++;
+;; }
+
+define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %N) #0 {
+; CHECK-LABEL: define void @simple_histogram(
+; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
+; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK: for.exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %indices, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %idxprom1 = zext i32 %0 to i64
+ %arrayidx2 = getelementptr inbounds i32, ptr %buckets, i64 %idxprom1
+ %1 = load i32, ptr %arrayidx2, align 4
+ %inc = add nsw i32 %1, 1
+ store i32 %inc, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, %N
+ br i1 %exitcond, label %for.exit, label %for.body
+
+for.exit:
+ ret void
+}
+
+define void @simple_histogram_sub(ptr noalias %buckets, ptr readonly %indices, i64 %N) #0 {
+; CHECK-LABEL: define void @simple_histogram_sub(
+; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[INC:%.*]] = sub nsw i32 [[TMP1]], 1
+; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK: for.exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %indices, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %idxprom1 = zext i32 %0 to i64
+ %arrayidx2 = getelementptr inbounds i32, ptr %buckets, i64 %idxprom1
+ %1 = load i32, ptr %arrayidx2, align 4
+ %inc = sub nsw i32 %1, 1
+ store i32 %inc, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, %N
+ br i1 %exitcond, label %for.exit, label %for.body
+
+for.exit:
+ ret void
+}
+
+define void @conditional_histogram(ptr noalias %buckets, ptr readonly %indices, ptr readonly %conds, i64 %N) #0 {
+; CHECK-LABEL: define void @conditional_histogram(
+; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], ptr readonly [[CONDS:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[NEXT:%.*]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[CONDIDX:%.*]] = getelementptr inbounds i32, ptr [[CONDS]], i64 [[IV]]
+; CHECK-NEXT: [[CONDDATA:%.*]] = load i32, ptr [[CONDIDX]], align 4
+; CHECK-NEXT: [[IFCOND:%.*]] = icmp sgt i32 [[CONDDATA]], 5100
+; CHECK-NEXT: br i1 [[IFCOND]], label [[IFTRUE:%.*]], label [[NEXT]]
+; CHECK: iftrue:
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
+; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: br label [[NEXT]]
+; CHECK: next:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK: for.exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %next ]
+ %arrayidx = getelementptr inbounds i32, ptr %indices, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %idxprom1 = zext i32 %0 to i64
+ %arrayidx2 = getelementptr inbounds i32, ptr %buckets, i64 %idxprom1
+ %condidx = getelementptr inbounds i32, ptr %conds, i64 %iv
+ %conddata = load i32, ptr %condidx, align 4
+ %ifcond = icmp sgt i32 %conddata, 5100
+ br i1 %ifcond, label %iftrue, label %next
+
+iftrue:
+ %1 = load i32, ptr %arrayidx2, align 4
+ %inc = add nsw i32 %1, 1
+ store i32 %inc, ptr %arrayidx2, align 4
+ br label %next
+
+next:
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, %N
+ br i1 %exitcond, label %for.exit, label %for.body
+
+for.exit:
+ ret void
+}
+
+;; Need to support legalization of smaller int types.
+define void @histogram_8bit(ptr noalias %buckets, ptr readonly %indices, i64 %N) #0 {
+; CHECK-LABEL: define void @histogram_8bit(
+; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i8, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i8, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[INC:%.*]] = add nsw i8 [[TMP1]], 1
+; CHECK-NEXT: store i8 [[INC]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK: for.exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %indices, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %idxprom1 = zext i32 %0 to i64
+ %arrayidx2 = getelementptr inbounds i8, ptr %buckets, i64 %idxprom1
+ %1 = load i8, ptr %arrayidx2, align 4
+ %inc = add nsw i8 %1, 1
+ store i8 %inc, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, %N
+ br i1 %exitcond, label %for.exit, label %for.body
+
+for.exit:
+ ret void
+}
+
+;; We don't currently support floating point histograms.
+define void @histogram_float(ptr noalias %buckets, ptr readonly %indices, i64 %N) #0 {
+; CHECK-LABEL: define void @histogram_float(
+; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds float, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load float, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[INC:%.*]] = fadd fast float [[TMP1]], 1.000000e+00
+; CHECK-NEXT: store float [[INC]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK: for.exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %indices, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %idxprom1 = zext i32 %0 to i64
+ %arrayidx2 = getelementptr inbounds float, ptr %buckets, i64 %idxprom1
+ %1 = load float, ptr %arrayidx2, align 4
+ %inc = fadd fast float %1, 1.0
+ store float %inc, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, %N
+ br i1 %exitcond, label %for.exit, label %for.body
+
+for.exit:
+ ret void
+}
+
+define void @histogram_varying_increment(ptr noalias %buckets, ptr readonly %indices, ptr readonly %incvals, i64 %N) #0 {
+; CHECK-LABEL: define void @histogram_varying_increment(
+; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], ptr readonly [[INCVALS:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: br label [[FOR_BODY:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
+; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[INCIDX:%.*]] = getelementptr inbounds i32, ptr [[INCVALS]], i64 [[IV]]
+; CHECK-NEXT: [[INCVAL:%.*]] = load i32, ptr [[INCIDX]], align 4
+; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], [[INCVAL]]
+; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK: for.exit:
+; CHECK-NEXT: ret void
+;
+entry:
+ br label %for.body
+
+for.body:
+ %iv = phi i64 [ 0, %entry ], [ %iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %indices, i64 %iv
+ %0 = load i32, ptr %arrayidx, align 4
+ %idxprom1 = zext i32 %0 to i64
+ %arrayidx2 = getelementptr inbounds i32, ptr %buckets, i64 %idxprom1
+ %1 = load i32, ptr %arrayidx2, align 4
+ %incidx = getelementptr inbounds i32, ptr %incvals, i64 %iv
+ %incval = load i32, ptr %incidx, align 4
+ %inc = add nsw i32 %1, %incval
+ store i32 %inc, ptr %arrayidx2, align 4
+ %iv.next = add nuw nsw i64 %iv, 1
+ %exitcond = icmp eq i64 %iv.next, %N
+ br i1 %exitcond, label %for.exit, label %for.body
+
+for.exit:
+ ret void
+}
+
+attributes #0 = { "target-features"="+sve2" vscale_range(1,16) }
>From ed4524bc4487ce058a4968e1d2f2c9059fb73193 Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Thu, 25 Apr 2024 14:54:28 +0100
Subject: [PATCH 2/3] [LV] Vectorize simple histograms
This patch introduces the ability to autovectorize loops containing
a histogram operation; that is,
* load from non-contiguous, possibly overlapping addresses
* update with a loop-invariant value
* store back to the same addresses
An example:
void simple_histogram(int *restrict buckets, unsigned *indices, int N) {
for (int i = 0; i < N; ++i)
buckets[indices[i]]++;
}
For this initial variant, we're fairly conservative and don't allow additional
uses of the loaded values, and only support add/sub of integers.
This uses the recently committed histogram intrinsic.
---
.../llvm/Analysis/LoopAccessAnalysis.h | 33 ++++-
.../llvm/Analysis/TargetTransformInfo.h | 8 +
.../llvm/Analysis/TargetTransformInfoImpl.h | 4 +
llvm/include/llvm/CodeGen/BasicTTIImpl.h | 4 +
.../Vectorize/LoopVectorizationLegality.h | 17 +++
llvm/lib/Analysis/LoopAccessAnalysis.cpp | 138 ++++++++++++++++--
llvm/lib/Analysis/TargetTransformInfo.cpp | 4 +
.../AArch64/AArch64TargetTransformInfo.cpp | 31 ++++
.../AArch64/AArch64TargetTransformInfo.h | 2 +
.../Transforms/Scalar/LoopLoadElimination.cpp | 1 +
.../Transforms/Vectorize/LoopVectorize.cpp | 57 +++++++-
.../Transforms/Vectorize/VPRecipeBuilder.h | 6 +
llvm/lib/Transforms/Vectorize/VPlan.h | 30 ++++
.../lib/Transforms/Vectorize/VPlanRecipes.cpp | 37 +++++
llvm/lib/Transforms/Vectorize/VPlanValue.h | 1 +
.../Analysis/LoopAccessAnalysis/histogram.ll | 5 +-
.../LoopVectorize/AArch64/sve2-histcnt.ll | 127 +++++++++++++---
17 files changed, 465 insertions(+), 40 deletions(-)
diff --git a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
index 7a54fe55014be..177ab165bc787 100644
--- a/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
+++ b/llvm/include/llvm/Analysis/LoopAccessAnalysis.h
@@ -144,7 +144,9 @@ class MemoryDepChecker {
// on MinDepDistBytes.
BackwardVectorizable,
// Same, but may prevent store-to-load forwarding.
- BackwardVectorizableButPreventsForwarding
+ BackwardVectorizableButPreventsForwarding,
+ // Access is to a loop loaded value, but is part of a histogram operation.
+ Histogram
};
/// String version of the types.
@@ -201,7 +203,8 @@ class MemoryDepChecker {
/// Only checks sets with elements in \p CheckDeps.
bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
const DenseMap<Value *, SmallVector<const Value *, 16>>
- &UnderlyingObjects);
+ &UnderlyingObjects,
+ const SmallPtrSetImpl<const Value *> &HistogramPtrs);
/// No memory dependence was encountered that would inhibit
/// vectorization.
@@ -343,7 +346,8 @@ class MemoryDepChecker {
isDependent(const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B,
unsigned BIdx,
const DenseMap<Value *, SmallVector<const Value *, 16>>
- &UnderlyingObjects);
+ &UnderlyingObjects,
+ const SmallPtrSetImpl<const Value *> &HistogramPtrs);
/// Check whether the data dependence could prevent store-load
/// forwarding.
@@ -384,7 +388,8 @@ class MemoryDepChecker {
const MemAccessInfo &A, Instruction *AInst, const MemAccessInfo &B,
Instruction *BInst,
const DenseMap<Value *, SmallVector<const Value *, 16>>
- &UnderlyingObjects);
+ &UnderlyingObjects,
+ const SmallPtrSetImpl<const Value *> &HistogramPtrs);
};
class RuntimePointerChecking;
@@ -436,6 +441,15 @@ struct PointerDiffInfo {
NeedsFreeze(NeedsFreeze) {}
};
+struct HistogramInfo {
+ Instruction *Load;
+ Instruction *Update;
+ Instruction *Store;
+
+ HistogramInfo(Instruction *Load, Instruction *Update, Instruction *Store)
+ : Load(Load), Update(Update), Store(Store) {}
+};
+
/// Holds information about the memory runtime legality checks to verify
/// that a group of pointers do not overlap.
class RuntimePointerChecking {
@@ -655,6 +669,10 @@ class LoopAccessInfo {
unsigned getNumStores() const { return NumStores; }
unsigned getNumLoads() const { return NumLoads;}
+ const SmallVectorImpl<HistogramInfo> &getHistograms() const {
+ return Histograms;
+ }
+
/// The diagnostics report generated for the analysis. E.g. why we
/// couldn't analyze the loop.
const OptimizationRemarkAnalysis *getReport() const { return Report.get(); }
@@ -768,6 +786,13 @@ class LoopAccessInfo {
/// If an access has a symbolic strides, this maps the pointer value to
/// the stride symbol.
DenseMap<Value *, const SCEV *> SymbolicStrides;
+
+ /// Holds the load, update, and store instructions for all histogram-style
+ /// operations found in the loop.
+ SmallVector<HistogramInfo, 2> Histograms;
+
+ /// Storing Histogram Pointers
+ SmallPtrSet<const Value *, 2> HistogramPtrs;
};
/// Return the SCEV corresponding to a pointer with the symbolic stride
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfo.h b/llvm/include/llvm/Analysis/TargetTransformInfo.h
index f55f21c94a85a..d056016248826 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfo.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfo.h
@@ -989,6 +989,9 @@ class TargetTransformInfo {
/// Return hardware support for population count.
PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
+ /// Returns the cost of generating a vector histogram.
+ InstructionCost getHistogramCost(Type *Ty) const;
+
/// Return true if the hardware has a fast square-root instruction.
bool haveFastSqrt(Type *Ty) const;
@@ -1939,6 +1942,7 @@ class TargetTransformInfo::Concept {
unsigned *Fast) = 0;
virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
virtual bool haveFastSqrt(Type *Ty) = 0;
+ virtual InstructionCost getHistogramCost(Type *Ty) = 0;
virtual bool isExpensiveToSpeculativelyExecute(const Instruction *I) = 0;
virtual bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) = 0;
virtual InstructionCost getFPOpCost(Type *Ty) = 0;
@@ -2505,6 +2509,10 @@ class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
}
bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
+ InstructionCost getHistogramCost(Type *Ty) override {
+ return Impl.getHistogramCost(Ty);
+ }
+
bool isExpensiveToSpeculativelyExecute(const Instruction* I) override {
return Impl.isExpensiveToSpeculativelyExecute(I);
}
diff --git a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
index 7828bdc1f1f43..5914343328da8 100644
--- a/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/llvm/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -420,6 +420,10 @@ class TargetTransformInfoImplBase {
bool haveFastSqrt(Type *Ty) const { return false; }
+ InstructionCost getHistogramCost(Type *Ty) const {
+ return InstructionCost::getInvalid();
+ }
+
bool isExpensiveToSpeculativelyExecute(const Instruction *I) { return true; }
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) const { return true; }
diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 9f8d3ded9b3c1..9eea1b91843b4 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -544,6 +544,10 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
}
+ InstructionCost getHistogramCost(Type *Ty) {
+ return InstructionCost::getInvalid();
+ }
+
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) {
return true;
}
diff --git a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
index a509ebf6a7e1b..3ac73232c0f7b 100644
--- a/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
+++ b/llvm/include/llvm/Transforms/Vectorize/LoopVectorizationLegality.h
@@ -387,6 +387,23 @@ class LoopVectorizationLegality {
unsigned getNumStores() const { return LAI->getNumStores(); }
unsigned getNumLoads() const { return LAI->getNumLoads(); }
+ bool isHistogramLoadOrUpdate(Instruction *I) const {
+ for (const HistogramInfo &HGram : LAI->getHistograms())
+ if (HGram.Load == I || HGram.Update == I)
+ return true;
+
+ return false;
+ }
+
+ std::optional<const HistogramInfo *>
+ getHistogramForStore(StoreInst *SI) const {
+ for (const HistogramInfo &HGram : LAI->getHistograms())
+ if (HGram.Store == SI)
+ return &HGram;
+
+ return std::nullopt;
+ }
+
PredicatedScalarEvolution *getPredicatedScalarEvolution() const {
return &PSE;
}
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 5cc6ce4c90054..b8d9dc19bae2f 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/AliasSetTracker.h"
#include "llvm/Analysis/LoopAnalysisManager.h"
@@ -70,6 +71,8 @@ using namespace llvm::PatternMatch;
#define DEBUG_TYPE "loop-accesses"
+STATISTIC(HistogramsDetected, "Number of Histograms detected");
+
static cl::opt<unsigned, true>
VectorizationFactor("force-vector-width", cl::Hidden,
cl::desc("Sets the SIMD width. Zero is autoselect."),
@@ -727,6 +730,23 @@ class AccessAnalysis {
return UnderlyingObjects;
}
+ /// Find Histogram counts that match high-level code in loops:
+ /// \code
+ /// buckets[indices[i]]+=step;
+ /// \endcode
+ ///
+ /// It matches a pattern starting from \p HSt, which Stores to the 'buckets'
+ /// array the computed histogram. It uses a BinOp to sum all counts, storing
+ /// them using a loop-variant index Load from the 'indices' input array.
+ ///
+ /// On successful matches it updates the STATISTIC 'HistogramsDetected',
+ /// regardless of hardware support. When there is support, it additionally
+ /// stores the BinOp/Load pairs in \p HistogramCounts, as well the pointers
+ /// used to update histogram in \p HistogramPtrs.
+ void findHistograms(StoreInst *HSt, Loop *TheLoop,
+ SmallVectorImpl<HistogramInfo> &Histograms,
+ SmallPtrSetImpl<const Value *> &HistogramPtrs);
+
private:
typedef MapVector<MemAccessInfo, SmallSetVector<Type *, 1>> PtrAccessMap;
@@ -1693,6 +1713,7 @@ MemoryDepChecker::Dependence::isSafeForVectorization(DepType Type) {
case NoDep:
case Forward:
case BackwardVectorizable:
+ case Histogram:
return VectorizationSafetyStatus::Safe;
case Unknown:
@@ -1713,6 +1734,7 @@ bool MemoryDepChecker::Dependence::isBackward() const {
case ForwardButPreventsForwarding:
case Unknown:
case IndirectUnsafe:
+ case Histogram:
return false;
case BackwardVectorizable:
@@ -1724,7 +1746,7 @@ bool MemoryDepChecker::Dependence::isBackward() const {
}
bool MemoryDepChecker::Dependence::isPossiblyBackward() const {
- return isBackward() || Type == Unknown;
+ return isBackward() || Type == Unknown || Type == Histogram;
}
bool MemoryDepChecker::Dependence::isForward() const {
@@ -1739,6 +1761,7 @@ bool MemoryDepChecker::Dependence::isForward() const {
case Backward:
case BackwardVectorizableButPreventsForwarding:
case IndirectUnsafe:
+ case Histogram:
return false;
}
llvm_unreachable("unexpected DepType!");
@@ -1908,8 +1931,8 @@ std::variant<MemoryDepChecker::Dependence::DepType,
MemoryDepChecker::getDependenceDistanceStrideAndSize(
const AccessAnalysis::MemAccessInfo &A, Instruction *AInst,
const AccessAnalysis::MemAccessInfo &B, Instruction *BInst,
- const DenseMap<Value *, SmallVector<const Value *, 16>>
- &UnderlyingObjects) {
+ const DenseMap<Value *, SmallVector<const Value *, 16>> &UnderlyingObjects,
+ const SmallPtrSetImpl<const Value *> &HistogramPtrs) {
auto &DL = InnermostLoop->getHeader()->getModule()->getDataLayout();
auto &SE = *PSE.getSE();
auto [APtr, AIsWrite] = A;
@@ -1927,6 +1950,12 @@ MemoryDepChecker::getDependenceDistanceStrideAndSize(
BPtr->getType()->getPointerAddressSpace())
return MemoryDepChecker::Dependence::Unknown;
+ // Ignore Histogram count updates as they are handled by the Intrinsic. This
+ // happens when the same pointer is first used to read from and then is used
+ // to write to.
+ if (!AIsWrite && BIsWrite && APtr == BPtr && HistogramPtrs.contains(APtr))
+ return MemoryDepChecker::Dependence::Histogram;
+
int64_t StrideAPtr =
getPtrStride(PSE, ATy, APtr, InnermostLoop, SymbolicStrides, true)
.value_or(0);
@@ -2004,14 +2033,14 @@ MemoryDepChecker::getDependenceDistanceStrideAndSize(
MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
const MemAccessInfo &A, unsigned AIdx, const MemAccessInfo &B,
unsigned BIdx,
- const DenseMap<Value *, SmallVector<const Value *, 16>>
- &UnderlyingObjects) {
+ const DenseMap<Value *, SmallVector<const Value *, 16>> &UnderlyingObjects,
+ const SmallPtrSetImpl<const Value *> &HistogramPtrs) {
assert(AIdx < BIdx && "Must pass arguments in program order");
// Get the dependence distance, stride, type size and what access writes for
// the dependence between A and B.
auto Res = getDependenceDistanceStrideAndSize(
- A, InstMap[AIdx], B, InstMap[BIdx], UnderlyingObjects);
+ A, InstMap[AIdx], B, InstMap[BIdx], UnderlyingObjects, HistogramPtrs);
if (std::holds_alternative<Dependence::DepType>(Res))
return std::get<Dependence::DepType>(Res);
@@ -2247,8 +2276,8 @@ MemoryDepChecker::Dependence::DepType MemoryDepChecker::isDependent(
bool MemoryDepChecker::areDepsSafe(
DepCandidates &AccessSets, MemAccessInfoList &CheckDeps,
- const DenseMap<Value *, SmallVector<const Value *, 16>>
- &UnderlyingObjects) {
+ const DenseMap<Value *, SmallVector<const Value *, 16>> &UnderlyingObjects,
+ const SmallPtrSetImpl<const Value *> &HistogramPtrs) {
MinDepDistBytes = -1;
SmallPtrSet<MemAccessInfo, 8> Visited;
@@ -2291,8 +2320,9 @@ bool MemoryDepChecker::areDepsSafe(
if (*I1 > *I2)
std::swap(A, B);
- Dependence::DepType Type = isDependent(*A.first, A.second, *B.first,
- B.second, UnderlyingObjects);
+ Dependence::DepType Type =
+ isDependent(*A.first, A.second, *B.first, B.second,
+ UnderlyingObjects, HistogramPtrs);
mergeInStatus(Dependence::isSafeForVectorization(Type));
// Gather dependences unless we accumulated MaxDependences
@@ -2343,7 +2373,8 @@ const char *MemoryDepChecker::Dependence::DepName[] = {
"ForwardButPreventsForwarding",
"Backward",
"BackwardVectorizable",
- "BackwardVectorizableButPreventsForwarding"};
+ "BackwardVectorizableButPreventsForwarding",
+ "Histogram"};
void MemoryDepChecker::Dependence::print(
raw_ostream &OS, unsigned Depth,
@@ -2623,6 +2654,9 @@ bool LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
// check.
Accesses.buildDependenceSets();
+ for (StoreInst *ST : Stores)
+ Accesses.findHistograms(ST, TheLoop, Histograms, HistogramPtrs);
+
// Find pointers with computable bounds. We are going to use this information
// to place a runtime bound check.
Value *UncomputablePtr = nullptr;
@@ -2644,9 +2678,9 @@ bool LoopAccessInfo::analyzeLoop(AAResults *AA, LoopInfo *LI,
bool DepsAreSafe = true;
if (Accesses.isDependencyCheckNeeded()) {
LLVM_DEBUG(dbgs() << "LAA: Checking memory dependencies\n");
- DepsAreSafe = DepChecker->areDepsSafe(DependentAccesses,
- Accesses.getDependenciesToCheck(),
- Accesses.getUnderlyingObjects());
+ DepsAreSafe = DepChecker->areDepsSafe(
+ DependentAccesses, Accesses.getDependenciesToCheck(),
+ Accesses.getUnderlyingObjects(), HistogramPtrs);
if (!DepsAreSafe && DepChecker->shouldRetryWithRuntimeCheck()) {
LLVM_DEBUG(dbgs() << "LAA: Retrying with memory checks\n");
@@ -2751,6 +2785,9 @@ void LoopAccessInfo::emitUnsafeDependenceRemark() {
case MemoryDepChecker::Dependence::Unknown:
R << "\nUnknown data dependence.";
break;
+ case MemoryDepChecker::Dependence::Histogram:
+ R << "\nHistogram data dependence.";
+ break;
}
if (Instruction *I = Dep.getSource(getDepChecker())) {
@@ -3079,6 +3116,79 @@ const LoopAccessInfo &LoopAccessInfoManager::getInfo(Loop &L) {
return *It->second;
}
+void AccessAnalysis::findHistograms(
+ StoreInst *HSt, Loop *TheLoop, SmallVectorImpl<HistogramInfo> &Histograms,
+ SmallPtrSetImpl<const Value *> &HistogramPtrs) {
+
+ // Store value must come from a Binary Operation.
+ Instruction *HPtrInstr = nullptr;
+ BinaryOperator *HBinOp = nullptr;
+ if (!match(HSt, m_Store(m_BinOp(HBinOp), m_Instruction(HPtrInstr))))
+ return;
+
+ // BinOp must be an Add or a Sub modifying the bucket value by a
+ // loop invariant amount.
+ // FIXME: We assume the loop invariant term is on the RHS.
+ // Fine for an immediate/constant, but maybe not a generic value?
+ Value *HIncVal = nullptr;
+ if (!match(HBinOp, m_Add(m_Load(m_Specific(HPtrInstr)), m_Value(HIncVal))) &&
+ !match(HBinOp, m_Sub(m_Load(m_Specific(HPtrInstr)), m_Value(HIncVal))))
+ return;
+
+ // Make sure the increment value is loop invariant.
+ if (!TheLoop->isLoopInvariant(HIncVal))
+ return;
+
+ // The address to store is calculated through a GEP Instruction.
+ // FIXME: Support GEPs with more operands.
+ GetElementPtrInst *HPtr = dyn_cast<GetElementPtrInst>(HPtrInstr);
+ if (!HPtr || HPtr->getNumOperands() > 2)
+ return;
+
+ // Check that the index is calculated by loading from another array. Ignore
+ // any extensions.
+ // FIXME: Support indices from other sources that a linear load from memory?
+ Value *HIdx = HPtr->getOperand(1);
+ Instruction *IdxInst = nullptr;
+ if (!match(HIdx, m_ZExtOrSExtOrSelf(m_Instruction(IdxInst))))
+ return;
+
+ // Currently restricting this to linear addressing when loading indices.
+ LoadInst *VLoad = dyn_cast<LoadInst>(IdxInst);
+ Value *VPtrVal;
+ if (!VLoad || !match(VLoad, m_Load(m_Value(VPtrVal))))
+ return;
+
+ if (!isa<SCEVAddRecExpr>(PSE.getSCEV(VPtrVal)))
+ return;
+
+ // Ensure we'll have the same mask by checking that all parts of the histogram
+ // (gather load, update, scatter store) are in the same block.
+ Instruction *IndexedLoad = cast<Instruction>(HBinOp->getOperand(0));
+ BasicBlock *LdBB = IndexedLoad->getParent();
+ if (LdBB != HBinOp->getParent() || LdBB != HSt->getParent())
+ return;
+
+ // A histogram pointer may only alias to itself, and must only have two uses,
+ // the load and the store.
+ // We may be able to relax these constraints later.
+ for (AliasSet &AS : AST)
+ if (AS.isMustAlias() || AS.isMayAlias())
+ if ((is_contained(AS.getPointers(), HPtr) && AS.size() > 1) ||
+ HPtr->getNumUses() != 2)
+ return;
+
+ HistogramsDetected++;
+
+ LLVM_DEBUG(dbgs() << "LAA: Found histogram for load: " << *IndexedLoad
+ << " and store: " << *HSt << "\n");
+
+ // Store the operations that make up the histogram.
+ Histograms.emplace_back(IndexedLoad, HBinOp, HSt);
+ // Store pointers used to write those counts in the computed histogram.
+ HistogramPtrs.insert(HPtr);
+}
+
bool LoopAccessInfoManager::invalidate(
Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &Inv) {
diff --git a/llvm/lib/Analysis/TargetTransformInfo.cpp b/llvm/lib/Analysis/TargetTransformInfo.cpp
index 7e721cbc87f3f..a49d60f156149 100644
--- a/llvm/lib/Analysis/TargetTransformInfo.cpp
+++ b/llvm/lib/Analysis/TargetTransformInfo.cpp
@@ -662,6 +662,10 @@ bool TargetTransformInfo::haveFastSqrt(Type *Ty) const {
return TTIImpl->haveFastSqrt(Ty);
}
+InstructionCost TargetTransformInfo::getHistogramCost(Type *Ty) const {
+ return TTIImpl->getHistogramCost(Ty);
+}
+
bool TargetTransformInfo::isExpensiveToSpeculativelyExecute(
const Instruction *I) const {
return TTIImpl->isExpensiveToSpeculativelyExecute(I);
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index 0f5d80a225d86..ec6e902eb913e 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -61,6 +61,11 @@ static cl::opt<bool> EnableOrLikeSelectOpt("enable-aarch64-or-like-select",
static cl::opt<bool> EnableLSRCostOpt("enable-aarch64-lsr-cost-opt",
cl::init(true), cl::Hidden);
+// A complete guess as to a reasonable cost.
+static cl::opt<unsigned>
+ BaseHistCntCost("aarch64-base-histcnt-cost", cl::init(8), cl::Hidden,
+ cl::desc("The cost of a histcnt instruction"));
+
namespace {
class TailFoldingOption {
// These bitfields will only ever be set to something non-zero in operator=,
@@ -508,6 +513,32 @@ static bool isUnpackedVectorVT(EVT VecVT) {
VecVT.getSizeInBits().getKnownMinValue() < AArch64::SVEBitsPerBlock;
}
+InstructionCost AArch64TTIImpl::getHistogramCost(Type *Ty) const {
+ if (!ST->hasSVE2orSME())
+ return InstructionCost::getInvalid();
+
+ Type *EltTy = Ty->getScalarType();
+
+ // Only allow (32b and 64b) integers or pointers for now...
+ if ((!EltTy->isIntegerTy() && !EltTy->isPointerTy()) ||
+ (EltTy->getScalarSizeInBits() != 32 &&
+ EltTy->getScalarSizeInBits() != 64))
+ return InstructionCost::getInvalid();
+
+ // FIXME: Hacky check for legal vector types. We can promote smaller types
+ // but we cannot legalize vectors via splitting for histcnt.
+ // FIXME: We should be able to generate histcnt for fixed-length vectors
+ // using ptrue with a specific VL.
+ if (VectorType *VTy = dyn_cast<VectorType>(Ty))
+ if ((VTy->getElementCount().getKnownMinValue() != 2 &&
+ VTy->getElementCount().getKnownMinValue() != 4) ||
+ VTy->getPrimitiveSizeInBits().getKnownMinValue() > 128 ||
+ !VTy->isScalableTy())
+ return InstructionCost::getInvalid();
+
+ return InstructionCost(BaseHistCntCost);
+}
+
InstructionCost
AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind) {
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 417e72da9ca10..99638c46ce48a 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -118,6 +118,8 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
return 31;
}
+ InstructionCost getHistogramCost(Type *Ty) const;
+
InstructionCost getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
TTI::TargetCostKind CostKind);
diff --git a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
index 38aea1371e1e1..4ad5339caedb3 100644
--- a/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/LoopLoadElimination.cpp
@@ -199,6 +199,7 @@ class LoadEliminationForLoop {
Instruction *Destination = Dep.getDestination(DepChecker);
if (Dep.Type == MemoryDepChecker::Dependence::Unknown ||
+ Dep.Type == MemoryDepChecker::Dependence::Histogram ||
Dep.Type == MemoryDepChecker::Dependence::IndirectUnsafe) {
if (isa<LoadInst>(Source))
LoadsWithUnknownDepedence.insert(Source);
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index 7835836d21ef1..e0f509c6b245f 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -5224,6 +5224,11 @@ LoopVectorizationCostModel::selectInterleaveCount(ElementCount VF,
if (!Legal->isSafeForAnyVectorWidth())
return 1;
+ if (!Legal->getLAI()->getHistograms().empty()) {
+ LLVM_DEBUG(dbgs() << "LV: Not interleaving histogram operations.\n");
+ return 1;
+ }
+
auto BestKnownTC = getSmallBestKnownTC(*PSE.getSE(), TheLoop);
const bool HasReductions = !Legal->getReductionVars().empty();
@@ -6850,8 +6855,19 @@ LoopVectorizationCostModel::getInstructionCost(Instruction *I, ElementCount VF,
// We've proven all lanes safe to speculate, fall through.
[[fallthrough]];
case Instruction::Add:
- case Instruction::FAdd:
case Instruction::Sub:
+ if (Legal->isHistogramLoadOrUpdate(I) && VF.isVector()) {
+ // Assume that a non-constant update value (or a constant != 1) requires
+ // a multiply, and add that into the cost.
+ InstructionCost MulCost = TTI::TCC_Free;
+ ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1));
+ if (!RHS || RHS->getZExtValue() > 1)
+ MulCost = TTI.getArithmeticInstrCost(Instruction::Mul, VectorTy);
+ return TTI.getHistogramCost(VectorTy) + MulCost +
+ TTI.getArithmeticInstrCost(I->getOpcode(), VectorTy);
+ }
+ [[fallthrough]];
+ case Instruction::FAdd:
case Instruction::FSub:
case Instruction::Mul:
case Instruction::FMul:
@@ -8327,6 +8343,36 @@ VPWidenRecipe *VPRecipeBuilder::tryToWiden(Instruction *I,
};
}
+VPHistogramRecipe *
+VPRecipeBuilder::tryToWidenHistogram(const HistogramInfo *HI,
+ ArrayRef<VPValue *> Operands) {
+ // FIXME: Support other operations.
+ unsigned Opcode = HI->Update->getOpcode();
+ assert((Opcode == Instruction::Add || Opcode == Instruction::Sub) &&
+ "Histogram update operation must be an Add or Sub");
+
+ SmallVector<VPValue *, 3> HGramOps;
+ // Bucket address.
+ HGramOps.push_back(Operands[1]);
+ // Increment value.
+ HGramOps.push_back(getVPValueOrAddLiveIn(HI->Update->getOperand(1), Plan));
+
+ // In case of predicated execution (due to tail-folding, or conditional
+ // execution, or both), pass the relevant mask. When there is no such mask,
+ // generate an all-true mask.
+ VPValue *Mask = nullptr;
+ if (Legal->isMaskRequired(HI->Store))
+ Mask = getBlockInMask(HI->Store->getParent());
+ else
+ Mask = Plan.getOrAddLiveIn(
+ ConstantInt::getTrue(IntegerType::getInt1Ty(HI->Load->getContext())));
+ HGramOps.push_back(Mask);
+
+ return new VPHistogramRecipe(Opcode,
+ make_range(HGramOps.begin(), HGramOps.end()),
+ HI->Store->getDebugLoc());
+}
+
void VPRecipeBuilder::fixHeaderPhis() {
BasicBlock *OrigLatch = OrigLoop->getLoopLatch();
for (VPHeaderPHIRecipe *R : PhisToFix) {
@@ -8444,6 +8490,10 @@ VPRecipeBuilder::tryToCreateWidenRecipe(Instruction *Instr,
if (auto *CI = dyn_cast<CallInst>(Instr))
return tryToWidenCall(CI, Operands, Range);
+ if (StoreInst *SI = dyn_cast<StoreInst>(Instr))
+ if (auto HistInfo = Legal->getHistogramForStore(SI))
+ return tryToWidenHistogram(*HistInfo, Operands);
+
if (isa<LoadInst>(Instr) || isa<StoreInst>(Instr))
return tryToWidenMemory(Instr, Operands, Range);
@@ -8646,6 +8696,11 @@ LoopVectorizationPlanner::tryToBuildVPlanWithVPRecipes(VFRange &Range) {
Operands = {OpRange.begin(), OpRange.end()};
}
+ // If this is a load instruction or a binop associated with a histogram,
+ // leave it until the store instruction to emit a combined intrinsic.
+ if (Legal->isHistogramLoadOrUpdate(Instr))
+ continue;
+
// Invariant stores inside loop will be deleted and a single store
// with the final reduction value will be added to the exit block
StoreInst *SI;
diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
index b4c7ab02f928f..2e0139da4668a 100644
--- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
+++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
@@ -102,6 +102,12 @@ class VPRecipeBuilder {
VPWidenRecipe *tryToWiden(Instruction *I, ArrayRef<VPValue *> Operands,
VPBasicBlock *VPBB);
+ /// Makes Histogram count operations safe for vectorization, by emitting a
+ /// Histogram LLVM Intrinsic before the BinOp (Add/Sub) that does the actual
+ /// counting.
+ VPHistogramRecipe *tryToWidenHistogram(const HistogramInfo *HI,
+ ArrayRef<VPValue *> Operands);
+
public:
VPRecipeBuilder(VPlan &Plan, Loop *OrigLoop, const TargetLibraryInfo *TLI,
LoopVectorizationLegality *Legal,
diff --git a/llvm/lib/Transforms/Vectorize/VPlan.h b/llvm/lib/Transforms/Vectorize/VPlan.h
index db10c7a240c7e..2fbe3d4c44f55 100644
--- a/llvm/lib/Transforms/Vectorize/VPlan.h
+++ b/llvm/lib/Transforms/Vectorize/VPlan.h
@@ -888,6 +888,7 @@ class VPSingleDefRecipe : public VPRecipeBase, public VPValue {
case VPRecipeBase::VPWidenLoadSC:
case VPRecipeBase::VPWidenStoreEVLSC:
case VPRecipeBase::VPWidenStoreSC:
+ case VPRecipeBase::VPHistogramSC:
// TODO: Widened stores don't define a value, but widened loads do. Split
// the recipes to be able to make widened loads VPSingleDefRecipes.
return false;
@@ -1517,6 +1518,35 @@ class VPWidenCallRecipe : public VPSingleDefRecipe {
#endif
};
+class VPHistogramRecipe : public VPRecipeBase {
+ unsigned Opcode;
+
+public:
+ template <typename IterT>
+ VPHistogramRecipe(unsigned Opcode, iterator_range<IterT> Operands,
+ DebugLoc DL = {})
+ : VPRecipeBase(VPDef::VPHistogramSC, Operands, DL), Opcode(Opcode) {}
+
+ ~VPHistogramRecipe() override = default;
+
+ VPHistogramRecipe *clone() override {
+ llvm_unreachable("cloning not supported");
+ }
+
+ VP_CLASSOF_IMPL(VPDef::VPHistogramSC);
+
+ // Produce a histogram operation with widened ingredients
+ void execute(VPTransformState &State) override;
+
+ unsigned getOpcode() { return Opcode; }
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+ /// Print the recipe
+ void print(raw_ostream &O, const Twine &Indent,
+ VPSlotTracker &SlotTracker) const override;
+#endif
+};
+
/// A recipe for widening select instructions.
struct VPWidenSelectRecipe : public VPSingleDefRecipe {
template <typename IterT>
diff --git a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
index a4a115037fa0d..a6c9e63d9c9e6 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanRecipes.cpp
@@ -21,6 +21,7 @@
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
@@ -868,6 +869,42 @@ void VPWidenCallRecipe::print(raw_ostream &O, const Twine &Indent,
O << ")";
}
}
+#endif
+
+void VPHistogramRecipe::execute(VPTransformState &State) {
+ assert(State.UF == 1 && "Tried interleaving histogram operation");
+ State.setDebugLocFrom(getDebugLoc());
+ IRBuilderBase &Builder = State.Builder;
+ Value *Address = State.get(getOperand(0), 0);
+ Value *IncVec = State.get(getOperand(1), 0);
+ Value *Mask = State.get(getOperand(2), 0);
+
+ // Not sure how to make IncAmt stay scalar yet. For now just extract the
+ // first element and tidy up later.
+ // FIXME: Do we actually want this to be scalar? We just splat it in the
+ // backend anyway...
+ Value *IncAmt = Builder.CreateExtractElement(IncVec, Builder.getInt64(0));
+
+ // If this is a subtract, we want to invert the increment amount. We may
+ // add a separate intrinsic in future, but for now we'll try this.
+ if (Opcode == Instruction::Sub)
+ IncAmt = Builder.CreateNeg(IncAmt);
+
+ State.Builder.CreateIntrinsic(Intrinsic::experimental_vector_histogram_add,
+ {Address->getType(), IncAmt->getType()},
+ {Address, IncAmt, Mask});
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+void VPHistogramRecipe::print(raw_ostream &O, const Twine &Indent,
+ VPSlotTracker &SlotTracker) const {
+ O << Indent << "WIDEN-HISTOGRAM buckets: ";
+ getOperand(0)->printAsOperand(O, SlotTracker);
+ O << ", inc: ";
+ getOperand(1)->printAsOperand(O, SlotTracker);
+ O << ", mask: ";
+ getOperand(2)->printAsOperand(O, SlotTracker);
+}
void VPWidenSelectRecipe::print(raw_ostream &O, const Twine &Indent,
VPSlotTracker &SlotTracker) const {
diff --git a/llvm/lib/Transforms/Vectorize/VPlanValue.h b/llvm/lib/Transforms/Vectorize/VPlanValue.h
index 8d945f6f2b8ea..88d6221928f9e 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanValue.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanValue.h
@@ -358,6 +358,7 @@ class VPDef {
VPWidenSC,
VPWidenSelectSC,
VPBlendSC,
+ VPHistogramSC,
// START: Phi-like recipes. Need to be kept together.
VPWidenPHISC,
VPPredInstPHISC,
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/histogram.ll b/llvm/test/Analysis/LoopAccessAnalysis/histogram.ll
index 32636c3878b92..a3cb1b9afc18d 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/histogram.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/histogram.ll
@@ -5,10 +5,9 @@
define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %N) {
; CHECK-LABEL: 'simple_histogram'
; CHECK-NEXT: for.body:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
+; CHECK-NEXT: Histogram:
; CHECK-NEXT: %1 = load i32, ptr %arrayidx2, align 4 ->
; CHECK-NEXT: store i32 %inc, ptr %arrayidx2, align 4
; CHECK-EMPTY:
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
index 283baf61b7393..ad2e11c38ed0a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
@@ -14,19 +14,48 @@ define void @simple_histogram(ptr noalias %buckets, ptr readonly %indices, i64 %
; CHECK-LABEL: define void @simple_histogram(
; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP10]], i32 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP12]] to i64
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP1]], 1
+; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[INC:%.*]] = add nsw i32 [[TMP13]], 1
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP3:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -54,19 +83,48 @@ define void @simple_histogram_sub(ptr noalias %buckets, ptr readonly %indices, i
; CHECK-LABEL: define void @simple_histogram_sub(
; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[TMP0]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP1]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = add i64 [[INDEX]], 0
+; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[TMP6]]
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[TMP7]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP10]], i32 -1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP11:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP11]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[FOR_BODY]] ]
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
-; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
+; CHECK-NEXT: [[TMP12:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP12]] to i64
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
-; CHECK-NEXT: [[INC:%.*]] = sub nsw i32 [[TMP1]], 1
+; CHECK-NEXT: [[TMP13:%.*]] = load i32, ptr [[ARRAYIDX2]], align 4
+; CHECK-NEXT: [[INC:%.*]] = sub nsw i32 [[TMP13]], 1
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
@@ -94,14 +152,47 @@ define void @conditional_histogram(ptr noalias %buckets, ptr readonly %indices,
; CHECK-LABEL: define void @conditional_histogram(
; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], ptr readonly [[CONDS:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP7:%.*]] = mul i64 [[TMP6]], 4
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[N]], [[TMP7]]
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP3:%.*]] = mul i64 [[TMP2]], 4
+; CHECK-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N]], [[TMP3]]
+; CHECK-NEXT: [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
; CHECK-NEXT: br label [[FOR_BODY:%.*]]
-; CHECK: for.body:
-; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[IV_NEXT:%.*]], [[NEXT:%.*]] ]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[FOR_BODY]] ]
+; CHECK-NEXT: [[IV:%.*]] = add i64 [[INDEX]], 0
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV]]
-; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX]], align 4
+; CHECK-NEXT: [[TMP8:%.*]] = getelementptr inbounds i32, ptr [[ARRAYIDX]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <vscale x 4 x i32>, ptr [[TMP8]], align 4
+; CHECK-NEXT: [[TMP9:%.*]] = zext <vscale x 4 x i32> [[WIDE_LOAD]] to <vscale x 4 x i64>
+; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], <vscale x 4 x i64> [[TMP9]]
+; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds i32, ptr [[CONDS]], i64 [[IV]]
+; CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds i32, ptr [[TMP11]], i32 0
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <vscale x 4 x i32>, ptr [[TMP12]], align 4
+; CHECK-NEXT: [[TMP13:%.*]] = icmp sgt <vscale x 4 x i32> [[WIDE_LOAD1]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 5100, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
+; CHECK-NEXT: call void @llvm.experimental.vector.histogram.add.nxv4p0.i32(<vscale x 4 x ptr> [[TMP10]], i32 1, <vscale x 4 x i1> [[TMP13]])
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[TMP5]]
+; CHECK-NEXT: [[TMP14:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP14]], label [[MIDDLE_BLOCK:%.*]], label [[FOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_EXIT:%.*]], label [[SCALAR_PH]]
+; CHECK: scalar.ph:
+; CHECK-NEXT: [[BC_RESUME_VAL:%.*]] = phi i64 [ [[N_VEC]], [[MIDDLE_BLOCK]] ], [ 0, [[ENTRY:%.*]] ]
+; CHECK-NEXT: br label [[FOR_BODY1:%.*]]
+; CHECK: for.body:
+; CHECK-NEXT: [[IV1:%.*]] = phi i64 [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[IV_NEXT:%.*]], [[NEXT:%.*]] ]
+; CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i32, ptr [[INDICES]], i64 [[IV1]]
+; CHECK-NEXT: [[TMP0:%.*]] = load i32, ptr [[ARRAYIDX1]], align 4
; CHECK-NEXT: [[IDXPROM1:%.*]] = zext i32 [[TMP0]] to i64
; CHECK-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, ptr [[BUCKETS]], i64 [[IDXPROM1]]
-; CHECK-NEXT: [[CONDIDX:%.*]] = getelementptr inbounds i32, ptr [[CONDS]], i64 [[IV]]
+; CHECK-NEXT: [[CONDIDX:%.*]] = getelementptr inbounds i32, ptr [[CONDS]], i64 [[IV1]]
; CHECK-NEXT: [[CONDDATA:%.*]] = load i32, ptr [[CONDIDX]], align 4
; CHECK-NEXT: [[IFCOND:%.*]] = icmp sgt i32 [[CONDDATA]], 5100
; CHECK-NEXT: br i1 [[IFCOND]], label [[IFTRUE:%.*]], label [[NEXT]]
@@ -111,9 +202,9 @@ define void @conditional_histogram(ptr noalias %buckets, ptr readonly %indices,
; CHECK-NEXT: store i32 [[INC]], ptr [[ARRAYIDX2]], align 4
; CHECK-NEXT: br label [[NEXT]]
; CHECK: next:
-; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV1]], 1
; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[IV_NEXT]], [[N]]
-; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT:%.*]], label [[FOR_BODY]]
+; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_EXIT]], label [[FOR_BODY1]], !llvm.loop [[LOOP7:![0-9]+]]
; CHECK: for.exit:
; CHECK-NEXT: ret void
;
>From 3e7b720b28ba083e7a118ef057ea82f403216cac Mon Sep 17 00:00:00 2001
From: Graham Hunter <graham.hunter at arm.com>
Date: Tue, 25 Jun 2024 12:55:45 +0000
Subject: [PATCH 3/3] Improve comments
---
llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h | 5 +++--
llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll | 1 +
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
index 2e0139da4668a..d8c9465576807 100644
--- a/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
+++ b/llvm/lib/Transforms/Vectorize/VPRecipeBuilder.h
@@ -103,8 +103,9 @@ class VPRecipeBuilder {
VPBasicBlock *VPBB);
/// Makes Histogram count operations safe for vectorization, by emitting a
- /// Histogram LLVM Intrinsic before the BinOp (Add/Sub) that does the actual
- /// counting.
+ /// llvm.experimental.vector.histogram.add intrinsic in place of the
+ /// Load + Add|Sub + Store operations that perform the histogram in the
+ /// original scalar loop.
VPHistogramRecipe *tryToWidenHistogram(const HistogramInfo *HI,
ArrayRef<VPValue *> Operands);
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
index ad2e11c38ed0a..a5d4d009a7503 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve2-histcnt.ll
@@ -319,6 +319,7 @@ for.exit:
ret void
}
+;; We don't support histograms with a update value that's not loop-invariant.
define void @histogram_varying_increment(ptr noalias %buckets, ptr readonly %indices, ptr readonly %incvals, i64 %N) #0 {
; CHECK-LABEL: define void @histogram_varying_increment(
; CHECK-SAME: ptr noalias [[BUCKETS:%.*]], ptr readonly [[INDICES:%.*]], ptr readonly [[INCVALS:%.*]], i64 [[N:%.*]]) #[[ATTR0]] {
More information about the llvm-commits
mailing list