[compiler-rt] 5c500c9 - [scudo] Make MTE inline asm compatible with GNU assembler

Roland McGrath via llvm-commits llvm-commits at lists.llvm.org
Tue Apr 13 18:11:51 PDT 2021


Author: Roland McGrath
Date: 2021-04-13T18:11:42-07:00
New Revision: 5c500c9f01aacc0d921db99681c3dc9c4078938c

URL: https://github.com/llvm/llvm-project/commit/5c500c9f01aacc0d921db99681c3dc9c4078938c
DIFF: https://github.com/llvm/llvm-project/commit/5c500c9f01aacc0d921db99681c3dc9c4078938c.diff

LOG: [scudo] Make MTE inline asm compatible with GNU assembler

The GNU assembler can't parse `.arch_extension ...` before a `;`.
So instead uniformly use raw string syntax with separate lines
instead of `;` separators in the assembly code.

Reviewed By: pcc

Differential Revision: https://reviews.llvm.org/D100413

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/memtag.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h
index 0a8a0b52173ac..c1b6b99d66f91 100644
--- a/compiler-rt/lib/scudo/standalone/memtag.h
+++ b/compiler-rt/lib/scudo/standalone/memtag.h
@@ -96,11 +96,19 @@ inline bool systemDetectsMemoryTagFaultsTestOnly() { return false; }
 #endif // SCUDO_LINUX
 
 inline void disableMemoryTagChecksTestOnly() {
-  __asm__ __volatile__(".arch_extension memtag; msr tco, #1");
+  __asm__ __volatile__(
+      R"(
+      .arch_extension memtag
+      msr tco, #1
+      )");
 }
 
 inline void enableMemoryTagChecksTestOnly() {
-  __asm__ __volatile__(".arch_extension memtag; msr tco, #0");
+  __asm__ __volatile__(
+      R"(
+      .arch_extension memtag
+      msr tco, #0
+      )");
 }
 
 class ScopedDisableMemoryTagChecks {
@@ -108,21 +116,33 @@ class ScopedDisableMemoryTagChecks {
 
 public:
   ScopedDisableMemoryTagChecks() {
-    __asm__ __volatile__(".arch_extension memtag; mrs %0, tco; msr tco, #1"
-                         : "=r"(PrevTCO));
+    __asm__ __volatile__(
+        R"(
+        .arch_extension memtag
+        mrs %0, tco
+        msr tco, #1
+        )"
+        : "=r"(PrevTCO));
   }
 
   ~ScopedDisableMemoryTagChecks() {
-    __asm__ __volatile__(".arch_extension memtag; msr tco, %0"
-                         :
-                         : "r"(PrevTCO));
+    __asm__ __volatile__(
+        R"(
+        .arch_extension memtag
+        msr tco, %0
+        )"
+        :
+        : "r"(PrevTCO));
   }
 };
 
 inline uptr selectRandomTag(uptr Ptr, uptr ExcludeMask) {
   uptr TaggedPtr;
   __asm__ __volatile__(
-      ".arch_extension memtag; irg %[TaggedPtr], %[Ptr], %[ExcludeMask]"
+      R"(
+      .arch_extension memtag
+      irg %[TaggedPtr], %[Ptr], %[ExcludeMask]
+      )"
       : [TaggedPtr] "=r"(TaggedPtr)
       : [Ptr] "r"(Ptr), [ExcludeMask] "r"(ExcludeMask));
   return TaggedPtr;
@@ -156,10 +176,14 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
   // chunk holding a low alignment allocation is reused for a higher alignment
   // allocation, the chunk may already have a non-zero tag from the previous
   // allocation.
-  __asm__ __volatile__(".arch_extension memtag; stg %0, [%0, #-16]"
-                       :
-                       : "r"(Ptr)
-                       : "memory");
+  __asm__ __volatile__(
+      R"(
+      .arch_extension memtag
+      stg %0, [%0, #-16]
+      )"
+      :
+      : "r"(Ptr)
+      : "memory");
 
   uptr TaggedBegin, TaggedEnd;
   setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd);
@@ -173,10 +197,14 @@ inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask,
   // purpose of catching linear overflows in this case.
   uptr UntaggedEnd = untagPointer(TaggedEnd);
   if (UntaggedEnd != BlockEnd)
-    __asm__ __volatile__(".arch_extension memtag; stg %0, [%0]"
-                         :
-                         : "r"(UntaggedEnd)
-                         : "memory");
+    __asm__ __volatile__(
+        R"(
+        .arch_extension memtag
+        stg %0, [%0]
+        )"
+        :
+        : "r"(UntaggedEnd)
+        : "memory");
   return reinterpret_cast<void *>(TaggedBegin);
 }
 
@@ -187,10 +215,14 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
     // of the allocation to 0. See explanation in prepareTaggedChunk above.
     uptr RoundNewPtr = untagPointer(roundUpTo(NewPtr, 16));
     if (RoundNewPtr != BlockEnd)
-      __asm__ __volatile__(".arch_extension memtag; stg %0, [%0]"
-                           :
-                           : "r"(RoundNewPtr)
-                           : "memory");
+      __asm__ __volatile__(
+          R"(
+          .arch_extension memtag
+          stg %0, [%0]
+          )"
+          :
+          : "r"(RoundNewPtr)
+          : "memory");
     return;
   }
 
@@ -220,10 +252,14 @@ inline void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr BlockEnd) {
 
 inline uptr loadTag(uptr Ptr) {
   uptr TaggedPtr = Ptr;
-  __asm__ __volatile__(".arch_extension memtag; ldg %0, [%0]"
-                       : "+r"(TaggedPtr)
-                       :
-                       : "memory");
+  __asm__ __volatile__(
+      R"(
+      .arch_extension memtag
+      ldg %0, [%0]
+      )"
+      : "+r"(TaggedPtr)
+      :
+      : "memory");
   return TaggedPtr;
 }
 


        


More information about the llvm-commits mailing list