[llvm] 2535fe5 - MTE: add more unchecked instructions.

Evgenii Stepanov via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 19 11:20:10 PST 2019


Author: Evgenii Stepanov
Date: 2019-11-19T11:19:53-08:00
New Revision: 2535fe5ad3327c8f77654a728986ca0afdf249f7

URL: https://github.com/llvm/llvm-project/commit/2535fe5ad3327c8f77654a728986ca0afdf249f7
DIFF: https://github.com/llvm/llvm-project/commit/2535fe5ad3327c8f77654a728986ca0afdf249f7.diff

LOG: MTE: add more unchecked instructions.

Summary:
In particular, 1- and 2-byte loads and stores ignore the pointer tag
when using SP as the base register.

Reviewers: pcc, ostannard

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D70341

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
    llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
index 3cc556f74aea..73bd434ef123 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
@@ -97,23 +97,49 @@ FunctionPass *llvm::createAArch64StackTaggingPreRAPass() {
 
 static bool isUncheckedLoadOrStoreOpcode(unsigned Opcode) {
   switch (Opcode) {
+  case AArch64::LDRBBui:
+  case AArch64::LDRHHui:
   case AArch64::LDRWui:
-  case AArch64::LDRSHWui:
   case AArch64::LDRXui:
+
   case AArch64::LDRBui:
-  case AArch64::LDRBBui:
   case AArch64::LDRHui:
   case AArch64::LDRSui:
   case AArch64::LDRDui:
   case AArch64::LDRQui:
+
+  case AArch64::LDRSHWui:
+  case AArch64::LDRSHXui:
+
+  case AArch64::LDRSBWui:
+  case AArch64::LDRSBXui:
+
+  case AArch64::LDRSWui:
+
+  case AArch64::STRBBui:
+  case AArch64::STRHHui:
   case AArch64::STRWui:
   case AArch64::STRXui:
+
   case AArch64::STRBui:
-  case AArch64::STRBBui:
   case AArch64::STRHui:
   case AArch64::STRSui:
   case AArch64::STRDui:
   case AArch64::STRQui:
+
+  case AArch64::LDPWi:
+  case AArch64::LDPXi:
+  case AArch64::LDPSi:
+  case AArch64::LDPDi:
+  case AArch64::LDPQi:
+
+  case AArch64::LDPSWi:
+
+  case AArch64::STPWi:
+  case AArch64::STPXi:
+  case AArch64::STPSi:
+  case AArch64::STPDi:
+  case AArch64::STPQi:
     return true;
   default:
     return false;

diff  --git a/llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll b/llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll
index 095bedf494a2..200837dabfe0 100644
--- a/llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll
+++ b/llvm/test/CodeGen/AArch64/stack-tagging-unchecked-ld-st.ll
@@ -3,12 +3,32 @@
 ; RUN: llc < %s -mtriple=aarch64 -mattr=+mte -stack-tagging-unchecked-ld-st=always | FileCheck %s --check-prefixes=ALWAYS,COMMON
 
 declare void @use8(i8*)
+declare void @use16(i16*)
 declare void @use32(i32*)
+declare void @use64(i64*)
 declare void @use2x64([2 x i64]*)
 declare void @llvm.lifetime.start.p0i8(i64, i8* nocapture)
 declare void @llvm.lifetime.end.p0i8(i64, i8* nocapture)
 
-define i32 @CallLd() sanitize_memtag {
+define i64 @CallLd64() sanitize_memtag {
+entry:
+  %x = alloca i64, align 4
+  call void @use64(i64* %x)
+  %a = load i64, i64* %x
+  ret i64 %a
+}
+
+; COMMON:  CallLd64:
+; COMMON:  bl  use64
+
+; ALWAYS:  ldr x0, [sp]
+; DEFAULT: ldr x0, [sp]
+; NEVER:   ldr x0, [x{{.*}}]
+
+; COMMON:  ret
+
+
+define i32 @CallLd32() sanitize_memtag {
 entry:
   %x = alloca i32, align 4
   call void @use32(i32* %x)
@@ -16,7 +36,7 @@ entry:
   ret i32 %a
 }
 
-; COMMON:  CallLd:
+; COMMON:  CallLd32:
 ; COMMON:  bl  use32
 
 ; ALWAYS:  ldr w0, [sp]
@@ -25,7 +45,64 @@ entry:
 
 ; COMMON:  ret
 
-define void @CallStCall() sanitize_memtag {
+
+define i16 @CallLd16() sanitize_memtag {
+entry:
+  %x = alloca i16, align 4
+  call void @use16(i16* %x)
+  %a = load i16, i16* %x
+  ret i16 %a
+}
+
+; COMMON:  CallLd16:
+; COMMON:  bl  use16
+
+; ALWAYS:  ldrh w0, [sp]
+; DEFAULT: ldrh w0, [sp]
+; NEVER:   ldrh w0, [x{{.*}}]
+
+; COMMON:  ret
+
+
+define i8 @CallLd8() sanitize_memtag {
+entry:
+  %x = alloca i8, align 4
+  call void @use8(i8* %x)
+  %a = load i8, i8* %x
+  ret i8 %a
+}
+
+; COMMON:  CallLd8:
+; COMMON:  bl  use8
+
+; ALWAYS:  ldrb w0, [sp]
+; DEFAULT: ldrb w0, [sp]
+; NEVER:   ldrb w0, [x{{.*}}]
+
+; COMMON:  ret
+
+
+define void @CallSt64Call() sanitize_memtag {
+entry:
+  %x = alloca i64, align 4
+  call void @use64(i64* %x)
+  store i64 42, i64* %x
+  call void @use64(i64* %x)
+  ret void
+}
+
+; COMMON:  CallSt64Call:
+; COMMON:  bl  use64
+
+; ALWAYS:  str x{{.*}}, [sp]
+; DEFAULT: str x{{.*}}, [sp]
+; NEVER:   str x{{.*}}, [x{{.*}}]
+
+; COMMON:  bl  use64
+; COMMON:  ret
+
+
+define void @CallSt32Call() sanitize_memtag {
 entry:
   %x = alloca i32, align 4
   call void @use32(i32* %x)
@@ -34,7 +111,7 @@ entry:
   ret void
 }
 
-; COMMON:  CallStCall:
+; COMMON:  CallSt32Call:
 ; COMMON:  bl  use32
 
 ; ALWAYS:  str w{{.*}}, [sp]
@@ -44,6 +121,48 @@ entry:
 ; COMMON:  bl  use32
 ; COMMON:  ret
 
+
+define void @CallSt16Call() sanitize_memtag {
+entry:
+  %x = alloca i16, align 4
+  call void @use16(i16* %x)
+  store i16 42, i16* %x
+  call void @use16(i16* %x)
+  ret void
+}
+
+
+; COMMON:  CallSt16Call:
+; COMMON:  bl  use16
+
+; ALWAYS:  strh w{{.*}}, [sp]
+; DEFAULT: strh w{{.*}}, [sp]
+; NEVER:   strh w{{.*}}, [x{{.*}}]
+
+; COMMON:  bl  use16
+; COMMON:  ret
+
+
+define void @CallSt8Call() sanitize_memtag {
+entry:
+  %x = alloca i8, align 4
+  call void @use8(i8* %x)
+  store i8 42, i8* %x
+  call void @use8(i8* %x)
+  ret void
+}
+
+; COMMON:  CallSt8Call:
+; COMMON:  bl  use8
+
+; ALWAYS:  strb w{{.*}}, [sp]
+; DEFAULT: strb w{{.*}}, [sp]
+; NEVER:   strb w{{.*}}, [x{{.*}}]
+
+; COMMON:  bl  use8
+; COMMON:  ret
+
+
 define void @CallStPair(i64 %z) sanitize_memtag {
 entry:
   %x = alloca [2 x i64], align 8


        


More information about the llvm-commits mailing list