[llvm] 39ae86a - [AArch64TTI] AArch64 supports NT vector stores through STNP.
Florian Hahn via llvm-commits
llvm-commits at lists.llvm.org
Wed Jan 22 17:13:55 PST 2020
Author: Florian Hahn
Date: 2020-01-22T16:45:24-08:00
New Revision: 39ae86ab72d706704ea70f85aa82c623cd99219d
URL: https://github.com/llvm/llvm-project/commit/39ae86ab72d706704ea70f85aa82c623cd99219d
DIFF: https://github.com/llvm/llvm-project/commit/39ae86ab72d706704ea70f85aa82c623cd99219d.diff
LOG: [AArch64TTI] AArch64 supports NT vector stores through STNP.
This patch adds a custom implementation of isLegalNTStore to AArch64TTI
that supports vector types that can be directly stored by STNP. Note
that the implementation may not catch all valid cases (e.g. because the
vector is a multiple of 256 and could be broken down to multiple valid 256 bit
stores), but it is good enough for LV to vectorize loops with NT stores,
as LV only passes in a vector with 2 elements to check. LV seems to also
be the only user of isLegalNTStore.
We should also do the same for NT loads, but before that we need to
ensure that we properly lower LDNP of vectors, similar to D72919.
Reviewers: dmgreen, samparker, t.p.northover, ab
Reviewed By: dmgreen
Differential Revision: https://reviews.llvm.org/D73158
Added:
llvm/test/Transforms/LoopVectorize/AArch64/nontemporal-load-store.ll
Modified:
llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
index 6f4569a49783..b143b45e57ab 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.h
@@ -172,6 +172,24 @@ class AArch64TTIImpl : public BasicTTIImplBase<AArch64TTIImpl> {
return isLegalMaskedLoadStore(DataType, Alignment);
}
+ bool isLegalNTStore(Type *DataType, Align Alignment) {
+ // NOTE: The logic below is mostly geared towards LV, which calls it with
+ // vectors with 2 elements. We might want to improve that, if other
+ // users show up.
+ // Nontemporal vector stores can be directly lowered to STNP, if the vector
+ // can be halved so that each half fits into a register. That's the case if
+ // the element type fits into a register and the number of elements is a
+ // power of 2 > 1.
+ if (isa<VectorType>(DataType)) {
+ unsigned NumElements = DataType->getVectorNumElements();
+ unsigned EltSize =
+ DataType->getVectorElementType()->getScalarSizeInBits();
+ return NumElements > 1 && isPowerOf2_64(NumElements) && EltSize >= 8 &&
+ EltSize <= 128 && isPowerOf2_64(EltSize);
+ }
+ return BaseT::isLegalNTStore(DataType, Alignment);
+ }
+
int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
ArrayRef<unsigned> Indices, unsigned Alignment,
unsigned AddressSpace,
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/nontemporal-load-store.ll b/llvm/test/Transforms/LoopVectorize/AArch64/nontemporal-load-store.ll
new file mode 100644
index 000000000000..a9c1f6c5cd1d
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/nontemporal-load-store.ll
@@ -0,0 +1,259 @@
+; RUN: opt -loop-vectorize -mtriple=arm64-apple-iphones -force-vector-width=4 -force-vector-interleave=1 %s -S | FileCheck %s
+
+; Vectors with i4 elements may not legal with nontemporal stores.
+define void @test_i4_store(i4* %ddst) {
+; CHECK-LABEL: define void @test_i4_store(
+; CHECK-NOT: vector.body:
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi i4* [ %ddst, %entry ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i4, i4* %ddst.addr, i64 1
+ store i4 10, i4* %ddst.addr, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 4
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @test_i8_store(i8* %ddst) {
+; CHECK-LABEL: define void @test_i8_store(
+; CHECK-LABEL: vector.body:
+; CHECK: store <4 x i8> {{.*}} !nontemporal !0
+; CHECK: br
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi i8* [ %ddst, %entry ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i8, i8* %ddst.addr, i64 1
+ store i8 10, i8* %ddst.addr, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 4
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @test_half_store(half* %ddst) {
+; CHECK-LABEL: define void @test_half_store(
+; CHECK-LABEL: vector.body:
+; CHECK: store <4 x half> {{.*}} !nontemporal !0
+; CHECK: br
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi half* [ %ddst, %entry ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds half, half* %ddst.addr, i64 1
+ store half 10.0, half* %ddst.addr, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 4
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @test_i16_store(i16* %ddst) {
+; CHECK-LABEL: define void @test_i16_store(
+; CHECK-LABEL: vector.body:
+; CHECK: store <4 x i16> {{.*}} !nontemporal !0
+; CHECK: br
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi i16* [ %ddst, %entry ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i16, i16* %ddst.addr, i64 1
+ store i16 10, i16* %ddst.addr, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 4
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @test_i32_store(i32* nocapture %ddst) {
+; CHECK-LABEL: define void @test_i32_store(
+; CHECK-LABEL: vector.body:
+; CHECK: store <16 x i32> {{.*}} !nontemporal !0
+; CHECK: br
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi i32* [ %ddst, %entry ], [ %incdec.ptr3, %for.body ]
+ %incdec.ptr = getelementptr inbounds i32, i32* %ddst.addr, i64 1
+ store i32 10, i32* %ddst.addr, align 4, !nontemporal !8
+ %incdec.ptr1 = getelementptr inbounds i32, i32* %ddst.addr, i64 2
+ store i32 20, i32* %incdec.ptr, align 4, !nontemporal !8
+ %incdec.ptr2 = getelementptr inbounds i32, i32* %ddst.addr, i64 3
+ store i32 30, i32* %incdec.ptr1, align 4, !nontemporal !8
+ %incdec.ptr3 = getelementptr inbounds i32, i32* %ddst.addr, i64 4
+ store i32 40, i32* %incdec.ptr2, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 4
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @test_i33_store(i33* nocapture %ddst) {
+; CHECK-LABEL: define void @test_i33_store(
+; CHECK-NOT: vector.body:
+; CHECK: ret
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi i33* [ %ddst, %entry ], [ %incdec.ptr3, %for.body ]
+ %incdec.ptr = getelementptr inbounds i33, i33* %ddst.addr, i64 1
+ store i33 10, i33* %ddst.addr, align 4, !nontemporal !8
+ %incdec.ptr1 = getelementptr inbounds i33, i33* %ddst.addr, i64 2
+ store i33 20, i33* %incdec.ptr, align 4, !nontemporal !8
+ %incdec.ptr2 = getelementptr inbounds i33, i33* %ddst.addr, i64 3
+ store i33 30, i33* %incdec.ptr1, align 4, !nontemporal !8
+ %incdec.ptr3 = getelementptr inbounds i33, i33* %ddst.addr, i64 4
+ store i33 40, i33* %incdec.ptr2, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 3
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @test_i40_store(i40* nocapture %ddst) {
+; CHECK-LABEL: define void @test_i40_store(
+; CHECK-NOT: vector.body:
+; CHECK: ret
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi i40* [ %ddst, %entry ], [ %incdec.ptr3, %for.body ]
+ %incdec.ptr = getelementptr inbounds i40, i40* %ddst.addr, i64 1
+ store i40 10, i40* %ddst.addr, align 4, !nontemporal !8
+ %incdec.ptr1 = getelementptr inbounds i40, i40* %ddst.addr, i64 2
+ store i40 20, i40* %incdec.ptr, align 4, !nontemporal !8
+ %incdec.ptr2 = getelementptr inbounds i40, i40* %ddst.addr, i64 3
+ store i40 30, i40* %incdec.ptr1, align 4, !nontemporal !8
+ %incdec.ptr3 = getelementptr inbounds i40, i40* %ddst.addr, i64 4
+ store i40 40, i40* %incdec.ptr2, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 3
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+define void @test_i64_store(i64* nocapture %ddst) local_unnamed_addr #0 {
+; CHECK-LABEL: define void @test_i64_store(
+; CHECK-LABEL: vector.body:
+; CHECK: store <4 x i64> {{.*}} !nontemporal !0
+; CHECK: br
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi i64* [ %ddst, %entry ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i64, i64* %ddst.addr, i64 1
+ store i64 10, i64* %ddst.addr, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 4
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @test_double_store(double* %ddst) {
+; CHECK-LABEL: define void @test_double_store(
+; CHECK-LABEL: vector.body:
+; CHECK: store <4 x double> {{.*}} !nontemporal !0
+; CHECK: br
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi double* [ %ddst, %entry ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds double, double* %ddst.addr, i64 1
+ store double 10.0, double* %ddst.addr, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 4
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @test_i128_store(i128* %ddst) {
+; CHECK-LABEL: define void @test_i128_store(
+; CHECK-LABEL: vector.body:
+; CHECK: store <4 x i128> {{.*}} !nontemporal !0
+; CHECK: br
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi i128* [ %ddst, %entry ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i128, i128* %ddst.addr, i64 1
+ store i128 10, i128* %ddst.addr, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 4
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+define void @test_i256_store(i256* %ddst) {
+; CHECK-LABEL: define void @test_i256_store(
+; CHECK-NOT: vector.body:
+; CHECK: ret void
+;
+entry:
+ br label %for.body
+
+for.body: ; preds = %entry, %for.body
+ %i = phi i32 [ 0, %entry ], [ %add, %for.body ]
+ %ddst.addr = phi i256* [ %ddst, %entry ], [ %incdec.ptr, %for.body ]
+ %incdec.ptr = getelementptr inbounds i256, i256* %ddst.addr, i64 1
+ store i256 10, i256* %ddst.addr, align 4, !nontemporal !8
+ %add = add nuw nsw i32 %i, 4
+ %cmp = icmp ult i32 %i, 4092
+ br i1 %cmp, label %for.body, label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.body
+ ret void
+}
+
+!8 = !{i32 1}
More information about the llvm-commits
mailing list