[llvm] 365bddf - [hwasan] Add intrinsics for fixed shadow on Aarch64 (#89319)

via llvm-commits llvm-commits at lists.llvm.org
Mon Apr 22 16:50:24 PDT 2024


Author: Thurston Dang
Date: 2024-04-22T16:50:19-07:00
New Revision: 365bddf634993d5ea357e9715d8aacd7ee40c4b5

URL: https://github.com/llvm/llvm-project/commit/365bddf634993d5ea357e9715d8aacd7ee40c4b5
DIFF: https://github.com/llvm/llvm-project/commit/365bddf634993d5ea357e9715d8aacd7ee40c4b5.diff

LOG: [hwasan] Add intrinsics for fixed shadow on Aarch64 (#89319)

This patch introduces HWASan memaccess intrinsics that assume a fixed
shadow (with the offset provided by --hwasan-mapping-offset=...), with
and without short granule support.

The behavior of HWASan is not meaningfully changed by this patch;
future work ("Optimize outlined memaccess for
fixed shadow on Aarch64": https://github.com/llvm/llvm-project/pull/88544) will make HWASan use these intrinsics.

We currently only support lowering the LLVM IR intrinsic to AArch64.

The test case is adapted from hwasan-check-memaccess.ll.

Added: 
    

Modified: 
    llvm/include/llvm/IR/Intrinsics.td
    llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
    llvm/lib/Target/AArch64/AArch64InstrInfo.td
    llvm/test/CodeGen/AArch64/hwasan-check-memaccess-fixedshadow.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 11262f265100c8..1d20f7e1b19854 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -2363,13 +2363,34 @@ def int_load_relative: DefaultAttrsIntrinsic<[llvm_ptr_ty], [llvm_ptr_ty, llvm_a
 def int_asan_check_memaccess :
   Intrinsic<[],[llvm_ptr_ty, llvm_i32_ty], [ImmArg<ArgIndex<1>>]>;
 
+// HWASan intrinsics to test whether a pointer is addressable.
+//===----------------------------------------------------------------------===//
+//
+// Variant 1) is the OG memaccess intrinsic
+// Parameters: Shadow base (passed in a register), pointer to be checked for
+// validity, AccessInfo (AccessInfo is defined in HWAddressSanitizer.h)
 def int_hwasan_check_memaccess :
   Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
             [ImmArg<ArgIndex<2>>]>;
+
+// Variant 2) supports short granule checks
+// Parameters: same as Variant 1
 def int_hwasan_check_memaccess_shortgranules :
   Intrinsic<[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
             [ImmArg<ArgIndex<2>>]>;
 
+// Variant 3) assumes a fixed shadow offset
+// Parameters: Pointer to be checked for validity, AccessInfo, Shadow base
+def int_hwasan_check_memaccess_fixedshadow :
+  Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i64_ty],
+            [ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
+// Variant 4) supports short granule checks and assumes a fixed shadow offset
+// Parameters: same as Variant 3
+def int_hwasan_check_memaccess_shortgranules_fixedshadow :
+  Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty, llvm_i64_ty],
+            [ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
+
 // Xray intrinsics
 //===----------------------------------------------------------------------===//
 // Custom event logging for x-ray.

diff  --git a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
index f6ccd0ecfdc893..ee39c6355c2980 100644
--- a/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
+++ b/llvm/lib/Target/AArch64/AArch64AsmPrinter.cpp
@@ -116,7 +116,8 @@ class AArch64AsmPrinter : public AsmPrinter {
   void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
   void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
 
-  typedef std::tuple<unsigned, bool, uint32_t> HwasanMemaccessTuple;
+  typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
+      HwasanMemaccessTuple;
   std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
   void LowerKCFI_CHECK(const MachineInstr &MI);
   void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
@@ -551,10 +552,18 @@ void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
 void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
   Register Reg = MI.getOperand(0).getReg();
   bool IsShort =
-      MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES;
+      ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
+       (MI.getOpcode() ==
+        AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
   uint32_t AccessInfo = MI.getOperand(1).getImm();
-  MCSymbol *&Sym =
-      HwasanMemaccessSymbols[HwasanMemaccessTuple(Reg, IsShort, AccessInfo)];
+  bool IsFixedShadow =
+      ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
+       (MI.getOpcode() ==
+        AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
+  uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
+
+  MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
+      Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
   if (!Sym) {
     // FIXME: Make this work on non-ELF.
     if (!TM.getTargetTriple().isOSBinFormatELF())
@@ -562,6 +571,8 @@ void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
 
     std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
                           utostr(AccessInfo);
+    if (IsFixedShadow)
+      SymName += "_fixed_" + utostr(FixedShadowOffset);
     if (IsShort)
       SymName += "_short_v2";
     Sym = OutContext.getOrCreateSymbol(SymName);
@@ -596,6 +607,8 @@ void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
     unsigned Reg = std::get<0>(P.first);
     bool IsShort = std::get<1>(P.first);
     uint32_t AccessInfo = std::get<2>(P.first);
+    bool IsFixedShadow = std::get<3>(P.first);
+    uint64_t FixedShadowOffset = std::get<4>(P.first);
     const MCSymbolRefExpr *HwasanTagMismatchRef =
         IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
     MCSymbol *Sym = P.second;
@@ -625,14 +638,35 @@ void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
                                      .addImm(4)
                                      .addImm(55),
                                  *STI);
-    OutStreamer->emitInstruction(
-        MCInstBuilder(AArch64::LDRBBroX)
-            .addReg(AArch64::W16)
-            .addReg(IsShort ? AArch64::X20 : AArch64::X9)
-            .addReg(AArch64::X16)
-            .addImm(0)
-            .addImm(0),
-        *STI);
+
+    if (IsFixedShadow) {
+      // Aarch64 makes it 
diff icult to embed large constants in the code.
+      // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
+      // left-shift option in the MOV instruction. Combined with the 16-bit
+      // immediate, this is enough to represent any offset up to 2**48.
+      OutStreamer->emitInstruction(MCInstBuilder(AArch64::MOVZXi)
+                                       .addReg(AArch64::X17)
+                                       .addImm(FixedShadowOffset >> 32)
+                                       .addImm(32),
+                                   *STI);
+      OutStreamer->emitInstruction(MCInstBuilder(AArch64::LDRBBroX)
+                                       .addReg(AArch64::W16)
+                                       .addReg(AArch64::X17)
+                                       .addReg(AArch64::X16)
+                                       .addImm(0)
+                                       .addImm(0),
+                                   *STI);
+    } else {
+      OutStreamer->emitInstruction(
+          MCInstBuilder(AArch64::LDRBBroX)
+              .addReg(AArch64::W16)
+              .addReg(IsShort ? AArch64::X20 : AArch64::X9)
+              .addReg(AArch64::X16)
+              .addImm(0)
+              .addImm(0),
+          *STI);
+    }
+
     OutStreamer->emitInstruction(
         MCInstBuilder(AArch64::SUBSXrs)
             .addReg(AArch64::XZR)
@@ -1765,6 +1799,8 @@ void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
 
   case AArch64::HWASAN_CHECK_MEMACCESS:
   case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
+  case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
+  case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
     LowerHWASAN_CHECK_MEMACCESS(*MI);
     return;
 

diff  --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index be53be782077b7..ebddbefeb94f94 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -1818,6 +1818,20 @@ def HWASAN_CHECK_MEMACCESS_SHORTGRANULES : Pseudo<
   Sched<[]>;
 }
 
+let Defs = [ X16, X17, LR, NZCV ] in {
+def HWASAN_CHECK_MEMACCESS_FIXEDSHADOW : Pseudo<
+  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo, i64imm:$fixed_shadow),
+  [(int_hwasan_check_memaccess_fixedshadow GPR64noip:$ptr, (i32 timm:$accessinfo), (i64 timm:$fixed_shadow))]>,
+  Sched<[]>;
+}
+
+let Defs = [ X16, X17, LR, NZCV ] in {
+def HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW : Pseudo<
+  (outs), (ins GPR64noip:$ptr, i32imm:$accessinfo, i64imm:$fixed_shadow),
+  [(int_hwasan_check_memaccess_shortgranules_fixedshadow GPR64noip:$ptr, (i32 timm:$accessinfo), (i64 timm:$fixed_shadow))]>,
+  Sched<[]>;
+}
+
 // The virtual cycle counter register is CNTVCT_EL0.
 def : Pat<(readcyclecounter), (MRS 0xdf02)>;
 

diff  --git a/llvm/test/CodeGen/AArch64/hwasan-check-memaccess-fixedshadow.ll b/llvm/test/CodeGen/AArch64/hwasan-check-memaccess-fixedshadow.ll
index e0be883b72c65a..2bce693c7bd837 100644
--- a/llvm/test/CodeGen/AArch64/hwasan-check-memaccess-fixedshadow.ll
+++ b/llvm/test/CodeGen/AArch64/hwasan-check-memaccess-fixedshadow.ll
@@ -1,5 +1,5 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
-; RUN: llc --hwasan-mapping-offset=4398046511104 < %s | FileCheck %s
+; RUN: llc < %s | FileCheck %s
 
 target triple = "aarch64--linux-android"
 
@@ -9,27 +9,24 @@ define ptr @f1(ptr %x0, ptr %x1) {
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov x9, x0
+; CHECK-NEXT:    bl __hwasan_check_x1_1_fixed_4398046511104
 ; CHECK-NEXT:    mov x0, x1
-; CHECK-NEXT:    bl __hwasan_check_x1_1
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  call void @llvm.hwasan.check.memaccess(ptr %x0, ptr %x1, i32 1)
+  call void @llvm.hwasan.check.memaccess.fixedshadow(ptr %x1, i32 1, i64 4398046511104)
   ret ptr %x1
 }
 
 define ptr @f2(ptr %x0, ptr %x1) {
 ; CHECK-LABEL: f2:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    stp x30, x20, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w20, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov x20, x1
-; CHECK-NEXT:    bl __hwasan_check_x0_2_short_v2
-; CHECK-NEXT:    ldp x30, x20, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    bl __hwasan_check_x0_2_fixed_4398046511104_short_v2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  call void @llvm.hwasan.check.memaccess.shortgranules(ptr %x1, ptr %x0, i32 2)
+  call void @llvm.hwasan.check.memaccess.shortgranules.fixedshadow(ptr %x0, i32 2, i64 4398046511104)
   ret ptr %x0
 }
 
@@ -40,11 +37,10 @@ define void @f3(ptr %x0, ptr %x1) {
 ; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov x9, x0
-; CHECK-NEXT:    bl __hwasan_check_x1_67043328
+; CHECK-NEXT:    bl __hwasan_check_x1_67043328_fixed_4398046511104
 ; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  call void @llvm.hwasan.check.memaccess(ptr %x0, ptr %x1, i32 67043328)
+  call void @llvm.hwasan.check.memaccess.fixedshadow(ptr %x1, i32 67043328, i64 4398046511104)
   ret void
 }
 
@@ -52,28 +48,27 @@ define void @f4(ptr %x0, ptr %x1) {
   ; 0x1000010 (access-size-index = 0, is-write = 1, match-all = 0x0)
 ; CHECK-LABEL: f4:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    stp x30, x20, [sp, #-16]! // 16-byte Folded Spill
+; CHECK-NEXT:    str x30, [sp, #-16]! // 8-byte Folded Spill
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    .cfi_offset w20, -8
 ; CHECK-NEXT:    .cfi_offset w30, -16
-; CHECK-NEXT:    mov x20, x0
-; CHECK-NEXT:    bl __hwasan_check_x1_16777232_short_v2
-; CHECK-NEXT:    ldp x30, x20, [sp], #16 // 16-byte Folded Reload
+; CHECK-NEXT:    bl __hwasan_check_x1_16777232_fixed_4398046511104_short_v2
+; CHECK-NEXT:    ldr x30, [sp], #16 // 8-byte Folded Reload
 ; CHECK-NEXT:    ret
-  call void @llvm.hwasan.check.memaccess.shortgranules(ptr %x0, ptr %x1, i32 16777232)
+  call void @llvm.hwasan.check.memaccess.shortgranules.fixedshadow(ptr %x1, i32 16777232, i64 4398046511104)
   ret void
 }
 
-declare void @llvm.hwasan.check.memaccess(ptr, ptr, i32)
-declare void @llvm.hwasan.check.memaccess.shortgranules(ptr, ptr, i32)
+declare void @llvm.hwasan.check.memaccess.fixedshadow(ptr, i32, i64)
+declare void @llvm.hwasan.check.memaccess.shortgranules.fixedshadow(ptr, i32, i64)
 
-; CHECK:      .section .text.hot,"axG", at progbits,__hwasan_check_x0_2_short_v2,comdat
-; CHECK-NEXT: .type __hwasan_check_x0_2_short_v2, at function
-; CHECK-NEXT: .weak __hwasan_check_x0_2_short_v2
-; CHECK-NEXT: .hidden __hwasan_check_x0_2_short_v2
-; CHECK-NEXT: __hwasan_check_x0_2_short_v2:
+; CHECK:      .section .text.hot,"axG", at progbits,__hwasan_check_x0_2_fixed_4398046511104_short_v2,comdat
+; CHECK-NEXT: .type __hwasan_check_x0_2_fixed_4398046511104_short_v2, at function
+; CHECK-NEXT: .weak __hwasan_check_x0_2_fixed_4398046511104_short_v2
+; CHECK-NEXT: .hidden __hwasan_check_x0_2_fixed_4398046511104_short_v2
+; CHECK-NEXT: __hwasan_check_x0_2_fixed_4398046511104_short_v2:
 ; CHECK-NEXT: sbfx x16, x0, #4, #52
-; CHECK-NEXT: ldrb w16, [x20, x16]
+; CHECK-NEXT: mov x17, #4398046511104
+; CHECK-NEXT: ldrb w16, [x17, x16]
 ; CHECK-NEXT: cmp x16, x0, lsr #56
 ; CHECK-NEXT: b.ne .Ltmp0
 ; CHECK-NEXT: .Ltmp1:
@@ -98,13 +93,14 @@ declare void @llvm.hwasan.check.memaccess.shortgranules(ptr, ptr, i32)
 ; CHECK-NEXT: br  x16
 
 
-; CHECK:      .section .text.hot,"axG", at progbits,__hwasan_check_x1_1,comdat
-; CHECK-NEXT: .type __hwasan_check_x1_1, at function
-; CHECK-NEXT: .weak __hwasan_check_x1_1
-; CHECK-NEXT: .hidden __hwasan_check_x1_1
-; CHECK-NEXT: __hwasan_check_x1_1:
+; CHECK:      .section .text.hot,"axG", at progbits,__hwasan_check_x1_1_fixed_4398046511104,comdat
+; CHECK-NEXT: .type __hwasan_check_x1_1_fixed_4398046511104, at function
+; CHECK-NEXT: .weak __hwasan_check_x1_1_fixed_4398046511104
+; CHECK-NEXT: .hidden __hwasan_check_x1_1_fixed_4398046511104
+; CHECK-NEXT: __hwasan_check_x1_1_fixed_4398046511104:
 ; CHECK-NEXT: sbfx x16, x1, #4, #52
-; CHECK-NEXT: ldrb w16, [x9, x16]
+; CHECK-NEXT: mov x17, #4398046511104
+; CHECK-NEXT: ldrb w16, [x17, x16]
 ; CHECK-NEXT: cmp x16, x1, lsr #56
 ; CHECK-NEXT: b.ne .Ltmp3
 ; CHECK-NEXT: .Ltmp4:
@@ -118,9 +114,10 @@ declare void @llvm.hwasan.check.memaccess.shortgranules(ptr, ptr, i32)
 ; CHECK-NEXT: ldr x16, [x16, :got_lo12:__hwasan_tag_mismatch]
 ; CHECK-NEXT: br  x16
 
-; CHECK:      __hwasan_check_x1_67043328:
+; CHECK:      __hwasan_check_x1_67043328_fixed_4398046511104:
 ; CHECK-NEXT: sbfx x16, x1, #4, #52
-; CHECK-NEXT: ldrb w16, [x9, x16]
+; CHECK-NEXT: mov x17, #4398046511104
+; CHECK-NEXT: ldrb w16, [x17, x16]
 ; CHECK-NEXT: cmp x16, x1, lsr #56
 ; CHECK-NEXT: b.ne .Ltmp5
 ; CHECK-NEXT: .Ltmp6:
@@ -135,9 +132,10 @@ declare void @llvm.hwasan.check.memaccess.shortgranules(ptr, ptr, i32)
 ; CHECK-NEXT: mov x1, #0
 ; CHECK-NEXT: b __hwasan_tag_mismatch
 
-; CHECK:      __hwasan_check_x1_16777232_short_v2:
+; CHECK:      __hwasan_check_x1_16777232_fixed_4398046511104_short_v2:
 ; CHECK-NEXT: sbfx	x16, x1, #4, #52
-; CHECK-NEXT: ldrb	w16, [x20, x16]
+; CHECK-NEXT: mov x17, #4398046511104
+; CHECK-NEXT: ldrb w16, [x17, x16]
 ; CHECK-NEXT: cmp	x16, x1, lsr #56
 ; CHECK-NEXT: b.ne	.Ltmp7
 ; CHECK-NEXT: .Ltmp8:


        


More information about the llvm-commits mailing list