[compiler-rt] 4c9adbb - [scudo/standalone] Use .arch_extension memtag, not mte

Roland McGrath via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 8 12:25:00 PST 2021


Author: Roland McGrath
Date: 2021-02-08T12:24:47-08:00
New Revision: 4c9adbb287e7e6cfea866b3c3254b50f21e5ce1f

URL: https://github.com/llvm/llvm-project/commit/4c9adbb287e7e6cfea866b3c3254b50f21e5ce1f
DIFF: https://github.com/llvm/llvm-project/commit/4c9adbb287e7e6cfea866b3c3254b50f21e5ce1f.diff

LOG: [scudo/standalone] Use .arch_extension memtag, not mte

GNU binutils accepts only `.arch_extension memtag` while Clang
accepts either that or `.arch_extension mte` to mean the same thing.

Reviewed By: pcc

Differential Revision: https://reviews.llvm.org/D95996

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/memtag.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h
index b1b62065ed72..07f08e1060df 100644
--- a/compiler-rt/lib/scudo/standalone/memtag.h
+++ b/compiler-rt/lib/scudo/standalone/memtag.h
@@ -88,11 +88,11 @@ inline bool systemDetectsMemoryTagFaultsTestOnly() { return false; }
 #endif // SCUDO_LINUX
 
 inline void disableMemoryTagChecksTestOnly() {
-  __asm__ __volatile__(".arch_extension mte; msr tco, #1");
+  __asm__ __volatile__(".arch_extension memtag; msr tco, #1");
 }
 
 inline void enableMemoryTagChecksTestOnly() {
-  __asm__ __volatile__(".arch_extension mte; msr tco, #0");
+  __asm__ __volatile__(".arch_extension memtag; msr tco, #0");
 }
 
 class ScopedDisableMemoryTagChecks {
@@ -100,19 +100,21 @@ class ScopedDisableMemoryTagChecks {
 
 public:
   ScopedDisableMemoryTagChecks() {
-    __asm__ __volatile__(".arch_extension mte; mrs %0, tco; msr tco, #1"
+    __asm__ __volatile__(".arch_extension memtag; mrs %0, tco; msr tco, #1"
                          : "=r"(PrevTCO));
   }
 
   ~ScopedDisableMemoryTagChecks() {
-    __asm__ __volatile__(".arch_extension mte; msr tco, %0" : : "r"(PrevTCO));
+    __asm__ __volatile__(".arch_extension memtag; msr tco, %0"
+                         :
+                         : "r"(PrevTCO));
   }
 };
 
 inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
   uptr TaggedPtr;
   __asm__ __volatile__(
-      ".arch_extension mte; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
+      ".arch_extension memtag; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
       : [TaggedPtr] "=r"(TaggedPtr)
       : [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
   return TaggedPtr;
@@ -123,7 +125,7 @@ inline uptr storeTags(uptr Begin, uptr End) {
   if (Begin != End) {
     __asm__ __volatile__(
         R"(
-      .arch_extension mte
+      .arch_extension memtag
 
     1:
       stzg %[Cur], [%[Cur]], #16
@@ -144,7 +146,7 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
   // chunk holding a low alignment allocation is reused for a higher alignment
   // allocation, the chunk may already have a non-zero tag from the previous
   // allocation.
-  __asm__ __volatile__(".arch_extension mte; stg %0, [%0, #-16]"
+  __asm__ __volatile__(".arch_extension memtag; stg %0, [%0, #-16]"
                        :
                        : "r"(Ptr)
                        : "memory");
@@ -161,7 +163,7 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
   // purpose of catching linear overflows in this case.
   uptr UntaggedEnd = untagPointer(TaggedEnd);
   if (UntaggedEnd != BlockEnd)
-    __asm__ __volatile__(".arch_extension mte; stg %0, [%0]"
+    __asm__ __volatile__(".arch_extension memtag; stg %0, [%0]"
                          :
                          : "r"(UntaggedEnd)
                          : "memory");
@@ -175,7 +177,7 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
     // of the allocation to 0. See explanation in prepareTaggedChunk above.
     uptr RoundNewPtr = untagPointer(roundUpTo(NewPtr, 16));
     if (RoundNewPtr != BlockEnd)
-      __asm__ __volatile__(".arch_extension mte; stg %0, [%0]"
+      __asm__ __volatile__(".arch_extension memtag; stg %0, [%0]"
                            :
                            : "r"(RoundNewPtr)
                            : "memory");
@@ -183,7 +185,7 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
   }
 
   __asm__ __volatile__(R"(
-    .arch_extension mte
+    .arch_extension memtag
 
     // Set the memory tag of the region
     // [roundUpTo(OldPtr, 16), roundUpTo(NewPtr, 16))
@@ -208,7 +210,7 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
 
 inline uptr loadTag(uptr Ptr) {
   uptr TaggedPtr = Ptr;
-  __asm__ __volatile__(".arch_extension mte; ldg %0, [%0]"
+  __asm__ __volatile__(".arch_extension memtag; ldg %0, [%0]"
                        : "+r"(TaggedPtr)
                        :
                        : "memory");


        


More information about the llvm-commits mailing list