[llvm] 5eb9acf - [HWASAN] Instrument scalable load/store without crashing

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Mar 23 08:24:39 PDT 2023


Author: Philip Reames
Date: 2023-03-23T08:24:30-07:00
New Revision: 5eb9acf9be3cee01ea95448fa8b1e00e3c01868a

URL: https://github.com/llvm/llvm-project/commit/5eb9acf9be3cee01ea95448fa8b1e00e3c01868a
DIFF: https://github.com/llvm/llvm-project/commit/5eb9acf9be3cee01ea95448fa8b1e00e3c01868a.diff

LOG: [HWASAN] Instrument scalable load/store without crashing

We can simply push them down the existing call slowpath with some minor changes to how we compute the size argument.

Added: 
    llvm/test/Instrumentation/HWAddressSanitizer/vector-load-store.ll

Modified: 
    llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index ca498d08422f7..f98cb67481154 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -964,7 +964,7 @@ bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
     return false; // FIXME
 
   IRBuilder<> IRB(O.getInsn());
-  if (isPowerOf2_64(O.TypeStoreSize) &&
+  if (!O.TypeStoreSize.isScalable() && isPowerOf2_64(O.TypeStoreSize) &&
       (O.TypeStoreSize / 8 <= (1ULL << (kNumberOfAccessSizes - 1))) &&
       (!O.Alignment || *O.Alignment >= Mapping.getObjectAlignment() ||
        *O.Alignment >= O.TypeStoreSize / 8)) {
@@ -980,7 +980,9 @@ bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
   } else {
     IRB.CreateCall(HwasanMemoryAccessCallbackSized[O.IsWrite],
                    {IRB.CreatePointerCast(Addr, IntptrTy),
-                    ConstantInt::get(IntptrTy, O.TypeStoreSize / 8)});
+                    IRB.CreateUDiv(IRB.CreateTypeSize(IntptrTy,
+                                                      O.TypeStoreSize),
+                                   ConstantInt::get(IntptrTy, 8))});
   }
   untagPointerOperand(O.getInsn(), Addr);
 

diff  --git a/llvm/test/Instrumentation/HWAddressSanitizer/vector-load-store.ll b/llvm/test/Instrumentation/HWAddressSanitizer/vector-load-store.ll
new file mode 100644
index 0000000000000..5312c7cc7336d
--- /dev/null
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/vector-load-store.ll
@@ -0,0 +1,272 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=hwasan -S | FileCheck %s
+
+target triple = "aarch64--linux-android10000"
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+
+define void @load.v1i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.v1i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 2)
+; CHECK-NEXT:    [[TMP1:%.*]] = load <1 x i32>, ptr [[P]], align 4
+; CHECK-NEXT:    ret void
+;
+  load <1 x i32>, ptr %p
+  ret void
+}
+
+define void @load.v2i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.v2i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 3)
+; CHECK-NEXT:    [[TMP1:%.*]] = load <2 x i32>, ptr [[P]], align 8
+; CHECK-NEXT:    ret void
+;
+  load <2 x i32>, ptr %p
+  ret void
+}
+
+define void @load.v4i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.v4i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 4)
+; CHECK-NEXT:    [[TMP1:%.*]] = load <4 x i32>, ptr [[P]], align 16
+; CHECK-NEXT:    ret void
+;
+  load <4 x i32>, ptr %p
+  ret void
+}
+
+define void @load.v8i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.v8i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    call void @__hwasan_loadN(i64 [[TMP1]], i64 32)
+; CHECK-NEXT:    [[TMP2:%.*]] = load <8 x i32>, ptr [[P]], align 32
+; CHECK-NEXT:    ret void
+;
+  load <8 x i32>, ptr %p
+  ret void
+}
+
+define void @load.v16i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.v16i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    call void @__hwasan_loadN(i64 [[TMP1]], i64 64)
+; CHECK-NEXT:    [[TMP2:%.*]] = load <16 x i32>, ptr [[P]], align 64
+; CHECK-NEXT:    ret void
+;
+  load <16 x i32>, ptr %p
+  ret void
+}
+
+
+define void @store.v1i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.v1i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 18)
+; CHECK-NEXT:    store <1 x i32> zeroinitializer, ptr [[P]], align 4
+; CHECK-NEXT:    ret void
+;
+  store <1 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @store.v2i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.v2i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 19)
+; CHECK-NEXT:    store <2 x i32> zeroinitializer, ptr [[P]], align 8
+; CHECK-NEXT:    ret void
+;
+  store <2 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @store.v4i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.v4i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    call void @llvm.hwasan.check.memaccess.shortgranules(ptr [[DOTHWASAN_SHADOW]], ptr [[P:%.*]], i32 20)
+; CHECK-NEXT:    store <4 x i32> zeroinitializer, ptr [[P]], align 16
+; CHECK-NEXT:    ret void
+;
+  store <4 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @store.v8i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.v8i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    call void @__hwasan_storeN(i64 [[TMP1]], i64 32)
+; CHECK-NEXT:    store <8 x i32> zeroinitializer, ptr [[P]], align 32
+; CHECK-NEXT:    ret void
+;
+  store <8 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @store.v16i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.v16i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    call void @__hwasan_storeN(i64 [[TMP1]], i64 64)
+; CHECK-NEXT:    store <16 x i32> zeroinitializer, ptr [[P]], align 64
+; CHECK-NEXT:    ret void
+;
+  store <16 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+
+define void @load.nxv1i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.nxv1i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 32
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 1 x i32>, ptr [[P]], align 4
+; CHECK-NEXT:    ret void
+;
+  load <vscale x 1 x i32>, ptr %p
+  ret void
+}
+
+define void @load.nxv2i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.nxv2i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 64
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 2 x i32>, ptr [[P]], align 8
+; CHECK-NEXT:    ret void
+;
+  load <vscale x 2 x i32>, ptr %p
+  ret void
+}
+
+define void @load.nxv4i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.nxv4i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 128
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 4 x i32>, ptr [[P]], align 16
+; CHECK-NEXT:    ret void
+;
+  load <vscale x 4 x i32>, ptr %p
+  ret void
+}
+
+define void @load.nxv8i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.nxv8i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 256
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 8 x i32>, ptr [[P]], align 32
+; CHECK-NEXT:    ret void
+;
+  load <vscale x 8 x i32>, ptr %p
+  ret void
+}
+
+define void @load.nxv16i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @load.nxv16i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 512
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_loadN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    [[TMP5:%.*]] = load <vscale x 16 x i32>, ptr [[P]], align 64
+; CHECK-NEXT:    ret void
+;
+  load <vscale x 16 x i32>, ptr %p
+  ret void
+}
+
+
+define void @store.nxv1i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.nxv1i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 32
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    store <vscale x 1 x i32> zeroinitializer, ptr [[P]], align 4
+; CHECK-NEXT:    ret void
+;
+  store <vscale x 1 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @store.nxv2i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.nxv2i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 64
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    store <vscale x 2 x i32> zeroinitializer, ptr [[P]], align 8
+; CHECK-NEXT:    ret void
+;
+  store <vscale x 2 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @store.nxv4i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.nxv4i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 128
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    store <vscale x 4 x i32> zeroinitializer, ptr [[P]], align 16
+; CHECK-NEXT:    ret void
+;
+  store <vscale x 4 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @store.nxv8i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.nxv8i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 256
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    store <vscale x 8 x i32> zeroinitializer, ptr [[P]], align 32
+; CHECK-NEXT:    ret void
+;
+  store <vscale x 8 x i32> zeroinitializer, ptr %p
+  ret void
+}
+
+define void @store.nxv16i32(ptr %p) sanitize_hwaddress {
+; CHECK-LABEL: @store.nxv16i32(
+; CHECK-NEXT:    [[DOTHWASAN_SHADOW:%.*]] = call ptr asm "", "=r,0"(ptr @__hwasan_shadow)
+; CHECK-NEXT:    [[TMP1:%.*]] = ptrtoint ptr [[P:%.*]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
+; CHECK-NEXT:    [[TMP3:%.*]] = mul i64 [[TMP2]], 512
+; CHECK-NEXT:    [[TMP4:%.*]] = udiv i64 [[TMP3]], 8
+; CHECK-NEXT:    call void @__hwasan_storeN(i64 [[TMP1]], i64 [[TMP4]])
+; CHECK-NEXT:    store <vscale x 16 x i32> zeroinitializer, ptr [[P]], align 64
+; CHECK-NEXT:    ret void
+;
+  store <vscale x 16 x i32> zeroinitializer, ptr %p
+  ret void
+}


        


More information about the llvm-commits mailing list