[llvm] c80fe25 - [ASAN] Support memory checks on vp.gather/scatter.
Yeting Kuo via llvm-commits
llvm-commits at lists.llvm.org
Fri May 26 00:28:47 PDT 2023
Author: Yeting Kuo
Date: 2023-05-26T15:28:40+08:00
New Revision: c80fe251d869d5304448cfa3bb855c653285de7f
URL: https://github.com/llvm/llvm-project/commit/c80fe251d869d5304448cfa3bb855c653285de7f
DIFF: https://github.com/llvm/llvm-project/commit/c80fe251d869d5304448cfa3bb855c653285de7f.diff
LOG: [ASAN] Support memory checks on vp.gather/scatter.
The patch supports vp.gather/scatter by allowing addresses being pointer vectors.
And then we just need to check each active pointer element of those pointer vectors.
Reviewed By: reames
Differential Revision: https://reviews.llvm.org/D149245
Added:
Modified:
llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
llvm/test/Instrumentation/AddressSanitizer/asan-vp-load-store.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
index 59dbfb3aabefb..511ac37b1cd1e 100644
--- a/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/AddressSanitizer.cpp
@@ -1378,6 +1378,21 @@ void AddressSanitizer::getInterestingMemoryOperands(
Stride);
break;
}
+ case Intrinsic::vp_gather:
+ case Intrinsic::vp_scatter: {
+ auto *VPI = cast<VPIntrinsic>(CI);
+ unsigned IID = CI->getIntrinsicID();
+ bool IsWrite = IID == Intrinsic::vp_scatter;
+ if (IsWrite ? !ClInstrumentWrites : !ClInstrumentReads)
+ return;
+ unsigned PtrOpNo = *VPI->getMemoryPointerParamPos(IID);
+ Type *Ty = IsWrite ? CI->getArgOperand(0)->getType() : CI->getType();
+ MaybeAlign Alignment = VPI->getPointerAlignment();
+ Interesting.emplace_back(I, PtrOpNo, IsWrite, Ty, Alignment,
+ VPI->getMaskParam(),
+ VPI->getVectorLengthParam());
+ break;
+ }
default:
for (unsigned ArgNo = 0; ArgNo < CI->arg_size(); ArgNo++) {
if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
@@ -1521,7 +1536,12 @@ void AddressSanitizer::instrumentMaskedLoadOrStore(
}
Value *InstrumentedAddress;
- if (Stride) {
+ if (isa<VectorType>(Addr->getType())) {
+ assert(
+ cast<VectorType>(Addr->getType())->getElementType()->isPointerTy() &&
+ "Expected vector of pointer.");
+ InstrumentedAddress = IRB.CreateExtractElement(Addr, Index);
+ } else if (Stride) {
Index = IRB.CreateMul(Index, Stride);
Addr = IRB.CreateBitCast(Addr, Type::getInt8PtrTy(*C));
InstrumentedAddress = IRB.CreateGEP(Type::getInt8Ty(*C), Addr, {Index});
diff --git a/llvm/test/Instrumentation/AddressSanitizer/asan-vp-load-store.ll b/llvm/test/Instrumentation/AddressSanitizer/asan-vp-load-store.ll
index 8911f45888043..ee752c8c61da0 100644
--- a/llvm/test/Instrumentation/AddressSanitizer/asan-vp-load-store.ll
+++ b/llvm/test/Instrumentation/AddressSanitizer/asan-vp-load-store.ll
@@ -444,3 +444,81 @@ define <vscale x 4 x float> @scalable.strided.load.nxv4f32.align(ptr align 4 %p,
%res = tail call <vscale x 4 x float> @llvm.experimental.vp.strided.load.nxv4f32.i32(ptr %p, i32 4, <vscale x 4 x i1> %mask, i32 %evl)
ret <vscale x 4 x float> %res
}
+
+; Test vp gather and scatter.
+declare <vscale x 4 x float> @llvm.vp.gather.nxv4f32.v4p0(<vscale x 4 x ptr>, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.scatter.nxv4f32.v4p0(<vscale x 4 x float>, <vscale x 4 x ptr>, <vscale x 4 x i1>, i32)
+
+define <vscale x 4 x float> @scalable.gather.nxv4f32(<vscale x 4 x ptr> %vp, <vscale x 4 x i1> %mask, i32 %evl) sanitize_address {
+; CHECK-LABEL: @scalable.gather.nxv4f32(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[EVL:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP12:%.*]]
+; CHECK: 2:
+; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[EVL]] to i64
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP5]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP2]] ], [ [[IV_NEXT:%.*]], [[TMP11:%.*]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <vscale x 4 x i1> [[MASK:%.*]], i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP11]]
+; CHECK: 8:
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <vscale x 4 x ptr> [[VP:%.*]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64
+; CHECK-NEXT: call void @__asan_load4(i64 [[TMP10]])
+; CHECK-NEXT: br label [[TMP11]]
+; CHECK: 11:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP6]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP12]]
+; CHECK: 12:
+; CHECK-NEXT: [[RES:%.*]] = tail call <vscale x 4 x float> @llvm.vp.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> align 4 [[VP]], <vscale x 4 x i1> [[MASK]], i32 [[EVL]])
+; CHECK-NEXT: ret <vscale x 4 x float> [[RES]]
+;
+; DISABLED-LABEL: @scalable.gather.nxv4f32(
+; DISABLED-NEXT: [[RES:%.*]] = tail call <vscale x 4 x float> @llvm.vp.gather.nxv4f32.nxv4p0(<vscale x 4 x ptr> align 4 [[VP:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[EVL:%.*]])
+; DISABLED-NEXT: ret <vscale x 4 x float> [[RES]]
+;
+ %res = tail call <vscale x 4 x float> @llvm.vp.gather.nxv4f32.v4p0(<vscale x 4 x ptr> align 4 %vp, <vscale x 4 x i1> %mask, i32 %evl)
+ ret <vscale x 4 x float> %res
+}
+
+define void @scalable.scatter.nxv4f32(<vscale x 4 x float> %arg, <vscale x 4 x ptr> %vp, <vscale x 4 x i1> %mask, i32 %evl) sanitize_address {
+; CHECK-LABEL: @scalable.scatter.nxv4f32(
+; CHECK-NEXT: [[TMP1:%.*]] = icmp ne i32 [[EVL:%.*]], 0
+; CHECK-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP12:%.*]]
+; CHECK: 2:
+; CHECK-NEXT: [[TMP3:%.*]] = zext i32 [[EVL]] to i64
+; CHECK-NEXT: [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT: [[TMP5:%.*]] = mul i64 [[TMP4]], 4
+; CHECK-NEXT: [[TMP6:%.*]] = call i64 @llvm.umin.i64(i64 [[TMP3]], i64 [[TMP5]])
+; CHECK-NEXT: br label [[DOTSPLIT:%.*]]
+; CHECK: .split:
+; CHECK-NEXT: [[IV:%.*]] = phi i64 [ 0, [[TMP2]] ], [ [[IV_NEXT:%.*]], [[TMP11:%.*]] ]
+; CHECK-NEXT: [[TMP7:%.*]] = extractelement <vscale x 4 x i1> [[MASK:%.*]], i64 [[IV]]
+; CHECK-NEXT: br i1 [[TMP7]], label [[TMP8:%.*]], label [[TMP11]]
+; CHECK: 8:
+; CHECK-NEXT: [[TMP9:%.*]] = extractelement <vscale x 4 x ptr> [[VP:%.*]], i64 [[IV]]
+; CHECK-NEXT: [[TMP10:%.*]] = ptrtoint ptr [[TMP9]] to i64
+; CHECK-NEXT: call void @__asan_store4(i64 [[TMP10]])
+; CHECK-NEXT: br label [[TMP11]]
+; CHECK: 11:
+; CHECK-NEXT: [[IV_NEXT]] = add nuw nsw i64 [[IV]], 1
+; CHECK-NEXT: [[IV_CHECK:%.*]] = icmp eq i64 [[IV_NEXT]], [[TMP6]]
+; CHECK-NEXT: br i1 [[IV_CHECK]], label [[DOTSPLIT_SPLIT:%.*]], label [[DOTSPLIT]]
+; CHECK: .split.split:
+; CHECK-NEXT: br label [[TMP12]]
+; CHECK: 12:
+; CHECK-NEXT: tail call void @llvm.vp.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[ARG:%.*]], <vscale x 4 x ptr> align 4 [[VP]], <vscale x 4 x i1> [[MASK]], i32 [[EVL]])
+; CHECK-NEXT: ret void
+;
+; DISABLED-LABEL: @scalable.scatter.nxv4f32(
+; DISABLED-NEXT: tail call void @llvm.vp.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[ARG:%.*]], <vscale x 4 x ptr> align 4 [[VP:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i32 [[EVL:%.*]])
+; DISABLED-NEXT: ret void
+;
+ tail call void @llvm.vp.scatter.nxv4f32.v4p0(<vscale x 4 x float> %arg, <vscale x 4 x ptr> align 4 %vp, <vscale x 4 x i1> %mask, i32 %evl)
+ ret void
+}
More information about the llvm-commits
mailing list