[lld] 210cc07 - [mac/lld] Fix scale computation for vector ops in PAGEOFF12 relocations

Nico Weber via llvm-commits llvm-commits at lists.llvm.org
Fri Mar 5 09:24:51 PST 2021


Author: Nico Weber
Date: 2021-03-05T12:24:37-05:00
New Revision: 210cc0738bbeecf97c9698e2bbe54bbb7d520387

URL: https://github.com/llvm/llvm-project/commit/210cc0738bbeecf97c9698e2bbe54bbb7d520387
DIFF: https://github.com/llvm/llvm-project/commit/210cc0738bbeecf97c9698e2bbe54bbb7d520387.diff

LOG: [mac/lld] Fix scale computation for vector ops in PAGEOFF12 relocations

With this, llvm-tblgen no longer tries and fails to allocate 7953 petabyte
when it runs during the build. Instead, `check-llvm` with lld/mac as host
linker now completes without any failures on an m1 mac.

This vector op handling code matches what happens in:
- ld64's OutputFile::applyFixUps() in OutputFile.cpp for kindStoreARM64PageOff12
- lld.ld64.darwinold's offset12KindFromInstruction() in
  lld/lib/ReaderWriter/MachO/ArchHandler_arm64.cpp for offset12scale16
- RuntimeDyld's decodeAddend() in
  llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h for
  ARM64_RELOC_PAGEOFF12

Fixes PR49444.

Differential Revision: https://reviews.llvm.org/D98053

Added: 
    

Modified: 
    lld/MachO/Arch/ARM64.cpp
    lld/test/MachO/arm64-relocs.s

Removed: 
    


################################################################################
diff  --git a/lld/MachO/Arch/ARM64.cpp b/lld/MachO/Arch/ARM64.cpp
index 18139503877e..a8796db8a7ca 100644
--- a/lld/MachO/Arch/ARM64.cpp
+++ b/lld/MachO/Arch/ARM64.cpp
@@ -127,8 +127,14 @@ inline uint64_t encodePage21(uint64_t base, uint64_t va) {
 // |                   |         imm12         |                   |
 // +-------------------+-----------------------+-------------------+
 
-inline uint64_t encodePageOff12(uint64_t base, uint64_t va) {
-  int scale = ((base & 0x3b000000) == 0x39000000) ? base >> 30 : 0;
+inline uint64_t encodePageOff12(uint32_t base, uint64_t va) {
+  int scale = 0;
+  if ((base & 0x3b00'0000) == 0x3900'0000) { // load/store
+    scale = base >> 30;
+    if (scale == 0 && (base & 0x0480'0000) == 0x0480'0000) // vector op?
+      scale = 4;
+  }
+
   // TODO(gkm): extract embedded addend and warn if != 0
   // uint64_t addend = ((base & 0x003FFC00) >> 10);
   return (base | bitField(va, scale, 12 - scale, 10));

diff  --git a/lld/test/MachO/arm64-relocs.s b/lld/test/MachO/arm64-relocs.s
index 1ce5e10bd0be..5f22479bb068 100644
--- a/lld/test/MachO/arm64-relocs.s
+++ b/lld/test/MachO/arm64-relocs.s
@@ -15,6 +15,8 @@
 ## PAGE21 relocations are aligned to 4096 bytes
 # CHECK-NEXT:  adrp	x2, [[#]] ; 0x[[#BAZ+4096-128]]
 # CHECK-NEXT:  ldr	x2, [x2, #128]
+# CHECK-NEXT:  adrp     x3, 8 ; 0x8000
+# CHECK-NEXT:  ldr      q0, [x3, #144]
 # CHECK-NEXT:  ret
 
 # CHECK-LABEL: Contents of (__DATA_CONST,__const) section
@@ -22,7 +24,7 @@
 # CHECK:       [[#PTR_2]]	{{0*}}[[#BAZ+123]] 00000000 00000000 00000000
 
 .text
-.globl _foo, _bar, _baz
+.globl _foo, _bar, _baz, _quux
 .p2align 2
 _foo:
   ## Generates ARM64_RELOC_BRANCH26 and ARM64_RELOC_ADDEND
@@ -31,6 +33,11 @@ _foo:
   adrp x2, _baz at PAGE + 4097
   ## Generates ARM64_RELOC_PAGEOFF12
   ldr x2, [x2, _baz at PAGEOFF]
+
+  ## Generates ARM64_RELOC_PAGE21
+  adrp x3, _quux at PAGE
+  ## Generates ARM64_RELOC_PAGEOFF12 with internal slide 4
+  ldr q0, [x3, _quux at PAGEOFF]
   ret
 
 .p2align 2
@@ -42,6 +49,11 @@ _bar:
 _baz:
 .space 1
 
+.p2align 4
+_quux:
+.quad 0
+.quad 80
+
 .section __DATA_CONST,__const
 ## These generate ARM64_RELOC_UNSIGNED symbol relocations. llvm-mc seems to
 ## generate UNSIGNED section relocations only for compact unwind sections, so


        


More information about the llvm-commits mailing list