[llvm] ecbf32d - [AArch64] Async unwind - Adjust unwind info in AArch64LoadStoreOptimizer

Momchil Velikov via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 13 09:25:37 PDT 2022


Author: Momchil Velikov
Date: 2022-04-13T17:04:53+01:00
New Revision: ecbf32dd88fc91b4fe709dc14bb3493dda6e8854

URL: https://github.com/llvm/llvm-project/commit/ecbf32dd88fc91b4fe709dc14bb3493dda6e8854
DIFF: https://github.com/llvm/llvm-project/commit/ecbf32dd88fc91b4fe709dc14bb3493dda6e8854.diff

LOG: [AArch64] Async unwind - Adjust unwind info in AArch64LoadStoreOptimizer

The AArch64LoadStoreOptimnizer pass may merge a register
increment/decrement with a following memory operation. In doing so, it
may break CFI by moving a stack pointer adjustment past the CFI
instruction that described *that* adjustment.

This patch fixes this issue by moving said CFI instruction after the
merged instruction, where the SP increment/decrement actually takes
place.

Reviewed By: efriedma

Differential Revision: https://reviews.llvm.org/D114547

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
    llvm/test/CodeGen/AArch64/arm64-fp128.ll
    llvm/test/CodeGen/AArch64/fcopysign.ll
    llvm/test/CodeGen/AArch64/swifttail-call.ll
    llvm/test/CodeGen/AArch64/tail-call.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index ccc6eff319e08..a7a1171026025 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -9,6 +9,12 @@
 // This file contains a pass that performs load / store related peephole
 // optimizations. This pass should be run after register allocation.
 //
+// The pass runs after the PrologEpilogInserter where we emit the CFI
+// instructions. In order to preserve the correctness of the unwind informaiton,
+// the pass should not change the order of any two instructions, one of which
+// has the FrameSetup/FrameDestroy flag or, alternatively, apply an add-hoc fix
+// to unwind information.
+//
 //===----------------------------------------------------------------------===//
 
 #include "AArch64InstrInfo.h"
@@ -31,6 +37,7 @@
 #include "llvm/CodeGen/TargetRegisterInfo.h"
 #include "llvm/IR/DebugLoc.h"
 #include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCDwarf.h"
 #include "llvm/MC/MCRegisterInfo.h"
 #include "llvm/Pass.h"
 #include "llvm/Support/CommandLine.h"
@@ -1762,6 +1769,26 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
   return E;
 }
 
+static MachineBasicBlock::iterator
+maybeMoveCFI(MachineInstr &MI, MachineBasicBlock::iterator MaybeCFI) {
+  if (MaybeCFI->getOpcode() != TargetOpcode::CFI_INSTRUCTION ||
+      !(MI.getFlag(MachineInstr::FrameSetup) ||
+        MI.getFlag(MachineInstr::FrameDestroy)) ||
+      getLdStBaseOp(MI).getReg() != AArch64::SP)
+    return MI.getParent()->end();
+
+  const MachineFunction &MF = *MI.getParent()->getParent();
+  unsigned CFIIndex = MaybeCFI->getOperand(0).getCFIIndex();
+  const MCCFIInstruction &CFI = MF.getFrameInstructions()[CFIIndex];
+  switch (CFI.getOperation()) {
+  case MCCFIInstruction::OpDefCfa:
+  case MCCFIInstruction::OpDefCfaOffset:
+    return MaybeCFI;
+  default:
+    return MI.getParent()->end();
+  }
+}
+
 MachineBasicBlock::iterator
 AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
                                      MachineBasicBlock::iterator Update,
@@ -1771,6 +1798,12 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
          "Unexpected base register update instruction to merge!");
   MachineBasicBlock::iterator E = I->getParent()->end();
   MachineBasicBlock::iterator NextI = next_nodbg(I, E);
+
+  // If updating the SP and the following instruction is CFA offset related CFI
+  // instruction move it after the merged instruction.
+  MachineBasicBlock::iterator CFI =
+      IsPreIdx ? maybeMoveCFI(*Update, next_nodbg(Update, E)) : E;
+
   // Return the instruction following the merged instruction, which is
   // the instruction following our unmerged load. Unless that's the add/sub
   // instruction we're merging, in which case it's the one after that.
@@ -1808,7 +1841,10 @@ AArch64LoadStoreOpt::mergeUpdateInsn(MachineBasicBlock::iterator I,
               .setMemRefs(I->memoperands())
               .setMIFlags(I->mergeFlagsWith(*Update));
   }
-  (void)MIB;
+  if (CFI != E) {
+    MachineBasicBlock *MBB = I->getParent();
+    MBB->splice(std::next(MIB.getInstr()->getIterator()), MBB, CFI);
+  }
 
   if (IsPreIdx) {
     ++NumPreFolded;

diff  --git a/llvm/test/CodeGen/AArch64/arm64-fp128.ll b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
index 597bc58d4aab5..5a25f128889cf 100644
--- a/llvm/test/CodeGen/AArch64/arm64-fp128.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-fp128.ll
@@ -420,8 +420,8 @@ define dso_local void @test_extend() {
 define fp128 @test_neg(fp128 %in) {
 ; CHECK-LABEL: test_neg:
 ; CHECK:       // %bb.0:
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    str q0, [sp, #-16]!
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldrb w8, [sp, #15]
 ; CHECK-NEXT:    eor w8, w8, #0x80
 ; CHECK-NEXT:    strb w8, [sp, #15]

diff  --git a/llvm/test/CodeGen/AArch64/fcopysign.ll b/llvm/test/CodeGen/AArch64/fcopysign.ll
index ff93ff77a1a0a..d633b641e0fe8 100644
--- a/llvm/test/CodeGen/AArch64/fcopysign.ll
+++ b/llvm/test/CodeGen/AArch64/fcopysign.ll
@@ -16,11 +16,11 @@ declare half @llvm.copysign.f16(half %a, half %b)
 define fp128 @copysign0() {
 ; CHECK-LABEL: copysign0:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    adrp x8, .LCPI0_0
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
 ; CHECK-NEXT:    adrp x8, val_double
 ; CHECK-NEXT:    str q0, [sp, #-16]!
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr x8, [x8, :lo12:val_double]
 ; CHECK-NEXT:    ldrb w9, [sp, #15]
 ; CHECK-NEXT:    and x8, x8, #0x8000000000000000
@@ -32,11 +32,11 @@ define fp128 @copysign0() {
 ;
 ; CHECK-NONEON-LABEL: copysign0:
 ; CHECK-NONEON:       // %bb.0: // %entry
-; CHECK-NONEON-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NONEON-NEXT:    adrp x8, .LCPI0_0
 ; CHECK-NONEON-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
 ; CHECK-NONEON-NEXT:    adrp x8, val_double
 ; CHECK-NONEON-NEXT:    str q0, [sp, #-16]!
+; CHECK-NONEON-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NONEON-NEXT:    ldr x8, [x8, :lo12:val_double]
 ; CHECK-NONEON-NEXT:    ldrb w9, [sp, #15]
 ; CHECK-NONEON-NEXT:    and x8, x8, #0x8000000000000000
@@ -55,11 +55,11 @@ entry:
 define fp128 at copysign1() {
 ; CHECK-LABEL: copysign1:
 ; CHECK:       // %bb.0: // %entry
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    adrp x8, val_fp128
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:val_fp128]
 ; CHECK-NEXT:    adrp x8, val_float
 ; CHECK-NEXT:    str q0, [sp, #-16]!
+; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    ldr w8, [x8, :lo12:val_float]
 ; CHECK-NEXT:    ldrb w9, [sp, #15]
 ; CHECK-NEXT:    and w8, w8, #0x80000000
@@ -71,11 +71,11 @@ define fp128 at copysign1() {
 ;
 ; CHECK-NONEON-LABEL: copysign1:
 ; CHECK-NONEON:       // %bb.0: // %entry
-; CHECK-NONEON-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NONEON-NEXT:    adrp x8, val_fp128
 ; CHECK-NONEON-NEXT:    ldr q0, [x8, :lo12:val_fp128]
 ; CHECK-NONEON-NEXT:    adrp x8, val_float
 ; CHECK-NONEON-NEXT:    str q0, [sp, #-16]!
+; CHECK-NONEON-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NONEON-NEXT:    ldr w8, [x8, :lo12:val_float]
 ; CHECK-NONEON-NEXT:    ldrb w9, [sp, #15]
 ; CHECK-NONEON-NEXT:    and w8, w8, #0x80000000

diff  --git a/llvm/test/CodeGen/AArch64/swifttail-call.ll b/llvm/test/CodeGen/AArch64/swifttail-call.ll
index 5b612ab40008a..3efd9f98f40dc 100644
--- a/llvm/test/CodeGen/AArch64/swifttail-call.ll
+++ b/llvm/test/CodeGen/AArch64/swifttail-call.ll
@@ -36,6 +36,7 @@ define swifttailcc void @caller_to8_from0() #0 {
   ret void
 
 ; COMMON: str {{x[0-9]+}}, [sp, #-16]!
+; COMMON-NEXT:	.cfi_def_cfa_offset 16
 ; COMMON-NEXT: b callee_stack8
 }
 

diff  --git a/llvm/test/CodeGen/AArch64/tail-call.ll b/llvm/test/CodeGen/AArch64/tail-call.ll
index 83943801344b6..1a5599fcc508b 100644
--- a/llvm/test/CodeGen/AArch64/tail-call.ll
+++ b/llvm/test/CodeGen/AArch64/tail-call.ll
@@ -36,6 +36,7 @@ define fastcc void @caller_to8_from0() #0 {
   ret void
 
 ; COMMON: str {{x[0-9]+}}, [sp, #-16]!
+; COMMON-NEXT: .cfi_def_cfa_offset 16
 ; COMMON-NEXT: b callee_stack8
 }
 


        


More information about the llvm-commits mailing list