[llvm] 2c63e8e - [AArch64] Fix a bug with jump table generation

Cullen Rhodes via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 6 06:33:23 PST 2019


Author: Cullen Rhodes
Date: 2019-12-06T14:31:53Z
New Revision: 2c63e8e36df071e62feb7ac9c948d6fdd54e0c35

URL: https://github.com/llvm/llvm-project/commit/2c63e8e36df071e62feb7ac9c948d6fdd54e0c35
DIFF: https://github.com/llvm/llvm-project/commit/2c63e8e36df071e62feb7ac9c948d6fdd54e0c35.diff

LOG: [AArch64] Fix a bug with jump table generation

Summary:
When trying to calculate the offsets for the jump table entries
we fail to take into account the block alignment, which could be
greater than 4 bytes. This led to cases where the jump table
offset was too big to fit in a byte.

Reviewers: t.p.northover, sdesmalen, ostannard

Reviewed By: ostannard

Subscribers: ostannard, kristof.beyls, hiraditya, llvm-commits

Committed on behalf of David Sherwood (david-arm)

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D70533

Added: 
    llvm/test/CodeGen/AArch64/jti-correct-datatype.mir

Modified: 
    llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp
    llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp b/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp
index 48dab79b32d3..259238705965 100644
--- a/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp
+++ b/llvm/lib/Target/AArch64/AArch64CompressJumpTables.cpp
@@ -20,6 +20,7 @@
 #include "llvm/CodeGen/TargetInstrInfo.h"
 #include "llvm/CodeGen/TargetSubtargetInfo.h"
 #include "llvm/MC/MCContext.h"
+#include "llvm/Support/Alignment.h"
 #include "llvm/Support/Debug.h"
 
 using namespace llvm;
@@ -74,10 +75,16 @@ void AArch64CompressJumpTables::scanFunction() {
   BlockInfo.clear();
   BlockInfo.resize(MF->getNumBlockIDs());
 
-  int Offset = 0;
+  unsigned Offset = 0;
   for (MachineBasicBlock &MBB : *MF) {
-    BlockInfo[MBB.getNumber()] = Offset;
-    Offset += computeBlockSize(MBB);
+    const Align Alignment = MBB.getAlignment();
+    unsigned AlignedOffset;
+    if (Alignment == Align::None())
+      AlignedOffset = Offset;
+    else
+      AlignedOffset = alignTo(Offset, Alignment);
+    BlockInfo[MBB.getNumber()] = AlignedOffset;
+    Offset = AlignedOffset + computeBlockSize(MBB);
   }
 }
 

diff  --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
index 21ce5785ea5e..9db746733aa3 100644
--- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
+++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp
@@ -150,6 +150,19 @@ static unsigned AdrImmBits(unsigned Value) {
   return (hi19 << 5) | (lo2 << 29);
 }
 
+static bool valueFitsIntoFixupKind(unsigned Kind, uint64_t Value) {
+  unsigned NumBits;
+  switch(Kind) {
+  case FK_Data_1: NumBits = 8; break;
+  case FK_Data_2: NumBits = 16; break;
+  case FK_Data_4: NumBits = 32; break;
+  case FK_Data_8: NumBits = 64; break;
+  default: return true;
+  }
+  return isUIntN(NumBits, Value) ||
+    isIntN(NumBits, static_cast<int64_t>(Value));
+}
+
 static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
                                  uint64_t Value, MCContext &Ctx,
                                  const Triple &TheTriple, bool IsResolved) {
@@ -309,11 +322,14 @@ static uint64_t adjustFixupValue(const MCFixup &Fixup, const MCValue &Target,
     if (Value & 0x3)
       Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned");
     return (Value >> 2) & 0x3ffffff;
-  case FK_NONE:
   case FK_Data_1:
   case FK_Data_2:
   case FK_Data_4:
   case FK_Data_8:
+    if (!valueFitsIntoFixupKind(Fixup.getTargetKind(), Value))
+      Ctx.reportError(Fixup.getLoc(), "fixup value too large for data type!");
+    LLVM_FALLTHROUGH;
+  case FK_NONE:
   case FK_SecRel_2:
   case FK_SecRel_4:
     return Value;

diff  --git a/llvm/test/CodeGen/AArch64/jti-correct-datatype.mir b/llvm/test/CodeGen/AArch64/jti-correct-datatype.mir
new file mode 100644
index 000000000000..aeb815c99d00
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/jti-correct-datatype.mir
@@ -0,0 +1,83 @@
+# RUN: llc -mtriple=aarch64-linux-gnu -start-after=branch-relaxation --filetype=obj -o %t.o %s
+
+--- |
+  target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+  target triple = "aarch64-unknown-linux-gnu"
+
+  @reps = external dso_local global i32, align 4
+
+  define void @foo() {
+    ret void
+  }
+
+  !2 = !{!3, !3, i64 0}
+  !3 = !{!"int", !4, i64 0}
+  !4 = !{!"omnipotent char", !5, i64 0}
+  !5 = !{!"Simple C++ TBAA"}
+...
+---
+name:            foo
+alignment:       64
+jumpTable:
+  kind:            block-address
+  entries:
+    - id:              0
+      blocks:          [ '%bb.2', '%bb.3', '%bb.4', '%bb.5' ]
+body:             |
+  bb.0:
+    successors: %bb.6(0x19999998), %bb.1(0x66666668)
+    liveins: $w0, $x1, $x2, $x3, $x4
+  
+    dead $wzr = SUBSWri renamable $w0, 3, 0, implicit-def $nzcv
+    Bcc 8, %bb.6, implicit $nzcv
+  
+  bb.1:
+    successors: %bb.2(0x20000000), %bb.3(0x20000000), %bb.4(0x20000000), %bb.5(0x20000000)
+    liveins: $w0, $x1, $x2, $x3, $x4
+  
+    $x8 = ADRP target-flags(aarch64-page) %jump-table.0
+    renamable $w9 = ORRWrs $wzr, killed renamable $w0, 0, implicit-def $x9
+    renamable $x8 = ADDXri killed $x8, target-flags(aarch64-pageoff, aarch64-nc) %jump-table.0, 0
+    early-clobber renamable $x10, dead early-clobber renamable $x11 = JumpTableDest32 killed renamable $x8, killed renamable $x9, %jump-table.0
+    BR killed renamable $x10
+  
+  bb.2 (align 65536):
+    successors: %bb.3(0x50000000), %bb.6(0x30000000)
+    liveins: $x1, $x2, $x3, $x4
+  
+    renamable $x8 = ADRP target-flags(aarch64-page) @reps
+    renamable $w9 = LDRWui renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) @reps :: (volatile dereferenceable load 4 from @reps, !tbaa !2)
+    dead $wzr = SUBSWri killed renamable $w9, 1, 0, implicit-def $nzcv
+    Bcc 11, %bb.6, implicit $nzcv
+  
+  bb.3 (align 65536):
+    successors: %bb.4(0x50000000), %bb.6(0x30000000)
+    liveins: $x1, $x2, $x3, $x4
+  
+    renamable $x8 = ADRP target-flags(aarch64-page) @reps
+    renamable $w9 = LDRWui renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) @reps :: (volatile dereferenceable load 4 from @reps, !tbaa !2)
+    dead $wzr = SUBSWri killed renamable $w9, 1, 0, implicit-def $nzcv
+    Bcc 11, %bb.6, implicit $nzcv
+  
+  bb.4 (align 65536):
+    successors: %bb.5(0x50000000), %bb.6(0x30000000)
+    liveins: $x1, $x2, $x3, $x4
+  
+    renamable $x8 = ADRP target-flags(aarch64-page) @reps
+    renamable $w9 = LDRWui renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) @reps :: (volatile dereferenceable load 4 from @reps, !tbaa !2)
+    dead $wzr = SUBSWri killed renamable $w9, 1, 0, implicit-def $nzcv
+    Bcc 11, %bb.6, implicit $nzcv
+  
+  bb.5 (align 65536):
+    successors: %bb.6(0x30000000)
+    liveins: $x1, $x2, $x3, $x4
+  
+    renamable $x8 = ADRP target-flags(aarch64-page) @reps
+    renamable $w9 = LDRWui renamable $x8, target-flags(aarch64-pageoff, aarch64-nc) @reps :: (volatile dereferenceable load 4 from @reps, !tbaa !2)
+    dead $wzr = SUBSWri killed renamable $w9, 1, 0, implicit-def $nzcv
+    Bcc 11, %bb.6, implicit $nzcv
+  
+  bb.6:
+    RET undef $lr
+
+...


        


More information about the llvm-commits mailing list