[llvm] [ARMAsmBackend] Add checks for relocation addends in assembler (PR #109969)

via llvm-commits llvm-commits at lists.llvm.org
Sun Sep 29 01:49:50 PDT 2024


https://github.com/jcohen-apple updated https://github.com/llvm/llvm-project/pull/109969

>From 218bd0c1db88337a076edef6e0749d47c20d94d6 Mon Sep 17 00:00:00 2001
From: Jonathan Cohen <jcohen22 at apple.com>
Date: Tue, 24 Sep 2024 17:28:47 +0300
Subject: [PATCH 1/2] [ARMAsmBackend] Add tests for invalid relocation values

The 32 bit ARM backend does not have range or alignment checks on constant addends that can be added to relocations.
---
 .../Target/ARM/MCTargetDesc/ARMAsmBackend.cpp |  5 ++--
 .../MC/ARM/macho-relocs-with-addend-invalid.s | 28 +++++++++++++++++++
 2 files changed, 31 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/MC/ARM/macho-relocs-with-addend-invalid.s

diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index 9f6bc31f8aa030..df35b0b05d66c0 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -1121,12 +1121,12 @@ void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
   assert(Offset + NumBytes <= Data.size() && "Invalid fixup offset!");
 
   // Used to point to big endian bytes.
-  unsigned FullSizeBytes;
+  unsigned FullSizeBytes = 0;
   if (Endian == llvm::endianness::big) {
     FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
     assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
     assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
-  }
+  } 
 
   // For each byte of the fragment that the fixup touches, mask in the bits from
   // the fixup value. The Value has been "split up" into the appropriate
@@ -1135,6 +1135,7 @@ void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
     unsigned Idx =
         Endian == llvm::endianness::little ? i : (FullSizeBytes - 1 - i);
     Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
+
   }
 }
 
diff --git a/llvm/test/MC/ARM/macho-relocs-with-addend-invalid.s b/llvm/test/MC/ARM/macho-relocs-with-addend-invalid.s
new file mode 100644
index 00000000000000..0c373fb67a1643
--- /dev/null
+++ b/llvm/test/MC/ARM/macho-relocs-with-addend-invalid.s
@@ -0,0 +1,28 @@
+// RUN: llvm-mc -triple armv7-apple-darwin -filetype=obj %s | llvm-objdump -d --macho - | FileCheck %s
+
+_foo:
+    // First issue: relocation addend range not checked, silently truncated
+    // CHECK: bl      0xffffff00
+    // CHECK: blx     0xffffff00
+    // CHECK: b       0xffffff00
+    // CHECK: ble     0xffffff00
+    // CHECK: beq     0xffffff00
+
+    bl  _foo+0xfffffff00
+    blx _foo+0xfffffff00
+    b   _foo+0xfffffff00
+    ble _foo+0xfffffff00
+    beq _foo+0xfffffff00
+
+    // Second bug: relocation addend alignment not checked, silently rounded
+    // CHECK: bl      0x100
+    // CHECK: blx     0x100
+    // CHECK: b       0x100
+    // CHECK: ble     0x100
+    // CHECK: beq     0x100
+    
+    bl  _foo+0x101
+    blx _foo+0x101
+    b   _foo+0x101
+    ble _foo+0x101
+    beq _foo+0x101

>From 458238b7f5dde04e7c334ad08a7fd2d977612318 Mon Sep 17 00:00:00 2001
From: Jonathan Cohen <jcohen22 at apple.com>
Date: Wed, 25 Sep 2024 13:37:22 +0300
Subject: [PATCH 2/2] [ARMAsmBackend] Add range and alignment checks for armv7
 branches

Currently we will silently truncate and round the relocation values input to the assembler, and produce branch instructions with a bad offset. After this change, the assembler will fail if the relocation value cannot be encoded into the instruction.
---
 .../Target/ARM/MCTargetDesc/ARMAsmBackend.cpp | 16 +++++++++--
 .../MC/ARM/macho-relocs-with-addend-invalid.s | 28 +++++++++----------
 llvm/test/MC/ARM/macho-relocs-with-addend.s   |  4 +--
 3 files changed, 29 insertions(+), 19 deletions(-)

diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
index df35b0b05d66c0..55f55a49e2eaa9 100644
--- a/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
+++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp
@@ -579,13 +579,24 @@ unsigned ARMAsmBackend::adjustFixupValue(const MCAssembler &Asm,
   case ARM::fixup_arm_uncondbl:
   case ARM::fixup_arm_condbl:
   case ARM::fixup_arm_blx:
+    // Check that the fixup value is legal.
+    Value = Value - 8;
+    if (!isInt<25>(Value)) {
+      Ctx.reportError(Fixup.getLoc(), "Relocation out of range");
+      return 0;
+    }
+    if (Value % 4 != 0) {
+      Ctx.reportError(Fixup.getLoc(), "Relocation not aligned");
+      return 0;
+    }
+
     // These values don't encode the low two bits since they're always zero.
     // Offset by 8 just as above.
     if (const MCSymbolRefExpr *SRE =
             dyn_cast<MCSymbolRefExpr>(Fixup.getValue()))
       if (SRE->getKind() == MCSymbolRefExpr::VK_TLSCALL)
         return 0;
-    return 0xffffff & ((Value - 8) >> 2);
+    return 0xffffff & (Value >> 2);
   case ARM::fixup_t2_uncondbranch: {
     if (STI->getTargetTriple().isOSBinFormatCOFF() && !IsResolved &&
         Value != 4) {
@@ -1126,7 +1137,7 @@ void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
     FullSizeBytes = getFixupKindContainerSizeBytes(Kind);
     assert((Offset + FullSizeBytes) <= Data.size() && "Invalid fixup size!");
     assert(NumBytes <= FullSizeBytes && "Invalid fixup size!");
-  } 
+  }
 
   // For each byte of the fragment that the fixup touches, mask in the bits from
   // the fixup value. The Value has been "split up" into the appropriate
@@ -1135,7 +1146,6 @@ void ARMAsmBackend::applyFixup(const MCAssembler &Asm, const MCFixup &Fixup,
     unsigned Idx =
         Endian == llvm::endianness::little ? i : (FullSizeBytes - 1 - i);
     Data[Offset + Idx] |= uint8_t((Value >> (i * 8)) & 0xff);
-
   }
 }
 
diff --git a/llvm/test/MC/ARM/macho-relocs-with-addend-invalid.s b/llvm/test/MC/ARM/macho-relocs-with-addend-invalid.s
index 0c373fb67a1643..11082ab0aebdbd 100644
--- a/llvm/test/MC/ARM/macho-relocs-with-addend-invalid.s
+++ b/llvm/test/MC/ARM/macho-relocs-with-addend-invalid.s
@@ -1,28 +1,28 @@
-// RUN: llvm-mc -triple armv7-apple-darwin -filetype=obj %s | llvm-objdump -d --macho - | FileCheck %s
+// RUN: not llvm-mc -triple armv7-apple-darwin -filetype=obj %s 2>&1 | FileCheck %s
 
 _foo:
-    // First issue: relocation addend range not checked, silently truncated
-    // CHECK: bl      0xffffff00
-    // CHECK: blx     0xffffff00
-    // CHECK: b       0xffffff00
-    // CHECK: ble     0xffffff00
-    // CHECK: beq     0xffffff00
+    // Check that the relocation size is valid.
 
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation out of range
     bl  _foo+0xfffffff00
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation out of range
     blx _foo+0xfffffff00
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation out of range
     b   _foo+0xfffffff00
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation out of range
     ble _foo+0xfffffff00
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation out of range
     beq _foo+0xfffffff00
 
-    // Second bug: relocation addend alignment not checked, silently rounded
-    // CHECK: bl      0x100
-    // CHECK: blx     0x100
-    // CHECK: b       0x100
-    // CHECK: ble     0x100
-    // CHECK: beq     0x100
-    
+    // Check that the relocation alignment is valid.
+
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation not aligned
     bl  _foo+0x101
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation not aligned
     blx _foo+0x101
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation not aligned
     b   _foo+0x101
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation not aligned
     ble _foo+0x101
+    // CHECK: :[[@LINE+1]]:{{[0-9]+}}: error: Relocation not aligned
     beq _foo+0x101
diff --git a/llvm/test/MC/ARM/macho-relocs-with-addend.s b/llvm/test/MC/ARM/macho-relocs-with-addend.s
index fee930eee1b3b5..f5421baf610863 100644
--- a/llvm/test/MC/ARM/macho-relocs-with-addend.s
+++ b/llvm/test/MC/ARM/macho-relocs-with-addend.s
@@ -15,9 +15,9 @@ _with_thumb:
         .globl _with_arm
         .arm
 _with_arm:
-        bl _dest+10
+        bl _dest+12
         blx _dest+20
-        bne _dest+30
+        bne _dest+32
         b _dest+40
 
         .data



More information about the llvm-commits mailing list