[PATCH] D87538: [VectorCombine] Don't vectorize scalar load under asan/tsan
Fangrui Song via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Fri Sep 11 14:51:00 PDT 2020
MaskRay updated this revision to Diff 291341.
MaskRay retitled this revision from "[VectorCombine] Don't vectorize scalar load under tsan" to "[VectorCombine] Don't vectorize scalar load under asan/tsan".
MaskRay edited the summary of this revision.
MaskRay added a comment.
suppress asan as well
Repository:
rG LLVM Github Monorepo
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D87538/new/
https://reviews.llvm.org/D87538
Files:
llvm/lib/Transforms/Vectorize/VectorCombine.cpp
llvm/test/Transforms/VectorCombine/X86/load.ll
Index: llvm/test/Transforms/VectorCombine/X86/load.ll
===================================================================
--- llvm/test/Transforms/VectorCombine/X86/load.ll
+++ llvm/test/Transforms/VectorCombine/X86/load.ll
@@ -292,6 +292,38 @@
ret <8 x i16> %r
}
+; Negative test - disable under asan because widened load can cause spurious
+; use-after-poison issues when __asan_poison_memory_region is used.
+
+define <8 x i16> @gep10_load_i16_insert_v8i16_asan(<8 x i16>* align 16 dereferenceable(32) %p) sanitize_address {
+; CHECK-LABEL: @gep10_load_i16_insert_v8i16_asan(
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[P:%.*]], i64 1, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[GEP]] to <8 x i16>*
+; CHECK-NEXT: [[R:%.*]] = load <8 x i16>, <8 x i16>* [[TMP1]], align 16
+; CHECK-NEXT: ret <8 x i16> [[R]]
+;
+ %gep = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i64 1, i64 0
+ %s = load i16, i16* %gep, align 16
+ %r = insertelement <8 x i16> undef, i16 %s, i64 0
+ ret <8 x i16> %r
+}
+
+; Negative test - disable under tsan because widened load may overlap bytes
+; being concurrently modified. tsan does not know that some bytes are undef.
+
+define <8 x i16> @gep10_load_i16_insert_v8i16_tsan(<8 x i16>* align 16 dereferenceable(32) %p) sanitize_thread {
+; CHECK-LABEL: @gep10_load_i16_insert_v8i16_tsan(
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds <8 x i16>, <8 x i16>* [[P:%.*]], i64 1, i64 0
+; CHECK-NEXT: [[TMP1:%.*]] = bitcast i16* [[GEP]] to <8 x i16>*
+; CHECK-NEXT: [[R:%.*]] = load <8 x i16>, <8 x i16>* [[TMP1]], align 16
+; CHECK-NEXT: ret <8 x i16> [[R]]
+;
+ %gep = getelementptr inbounds <8 x i16>, <8 x i16>* %p, i64 1, i64 0
+ %s = load i16, i16* %gep, align 16
+ %r = insertelement <8 x i16> undef, i16 %s, i64 0
+ ret <8 x i16> %r
+}
+
; Negative test - can't safely load the offset vector, but could load+shuffle.
define <8 x i16> @gep10_load_i16_insert_v8i16_deref(<8 x i16>* align 16 dereferenceable(31) %p) {
Index: llvm/lib/Transforms/Vectorize/VectorCombine.cpp
===================================================================
--- llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -687,6 +687,11 @@
if (!TTI.getNumberOfRegisters(TTI.getRegisterClassForType(/*Vector*/ true)))
return false;
+ // Do not vectorize scalar load under asan or tsan. The widened load may
+ // overlap bytes marked as __asan_poison_memory_region or bytes being
+ // concurrently modified.
+ bool CanWiden = !F.hasFnAttribute(Attribute::SanitizeAddress) ||
+ !F.hasFnAttribute(Attribute::SanitizeThread);
bool MadeChange = false;
for (BasicBlock &BB : F) {
// Ignore unreachable basic blocks.
@@ -700,7 +705,8 @@
if (isa<DbgInfoIntrinsic>(I))
continue;
Builder.SetInsertPoint(&I);
- MadeChange |= vectorizeLoadInsert(I);
+ if (CanWiden)
+ MadeChange |= vectorizeLoadInsert(I);
MadeChange |= foldExtractExtract(I);
MadeChange |= foldBitcastShuf(I);
MadeChange |= scalarizeBinopOrCmp(I);
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D87538.291341.patch
Type: text/x-patch
Size: 3149 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20200911/35fda2d2/attachment.bin>
More information about the llvm-commits
mailing list