[llvm] 0448d11 - [AArch64][GlobalISel] Don't emit a branch for a fallthrough G_BR at -O0.

Amara Emerson via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 10 15:01:47 PDT 2020


Author: Amara Emerson
Date: 2020-09-10T15:01:26-07:00
New Revision: 0448d11a06b451a63a8f60408fec613ad24801ba

URL: https://github.com/llvm/llvm-project/commit/0448d11a06b451a63a8f60408fec613ad24801ba
DIFF: https://github.com/llvm/llvm-project/commit/0448d11a06b451a63a8f60408fec613ad24801ba.diff

LOG: [AArch64][GlobalISel] Don't emit a branch for a fallthrough G_BR at -O0.

With optimizations we leave the decision to eliminate fallthrough branches to
bock placement, but at -O0 we should do it in the selector to save code size.

This regressed -O0 with a recent change to a combiner.

Added: 
    

Modified: 
    llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
    llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt-constrain.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
    llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir
    llvm/test/CodeGen/AArch64/unwind-preserved.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
index 33fb9b7287d5..aa155e18e110 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64InstructionSelector.cpp
@@ -35,6 +35,7 @@
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/Type.h"
 #include "llvm/IR/IntrinsicsAArch64.h"
+#include "llvm/Pass.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/raw_ostream.h"
 
@@ -1755,6 +1756,18 @@ bool AArch64InstructionSelector::earlySelect(MachineInstr &I) const {
   MachineRegisterInfo &MRI = MF.getRegInfo();
 
   switch (I.getOpcode()) {
+  case TargetOpcode::G_BR: {
+    // If the branch jumps to the fallthrough block, don't bother emitting it.
+    // Only do this for -O0 for a good code size improvement, because when
+    // optimizations are enabled we want to leave this choice to
+    // MachineBlockPlacement.
+    Function &F = MF.getFunction();
+    bool EnableOpt = MF.getTarget().getOptLevel() != CodeGenOpt::None;
+    if (EnableOpt || !MBB.isLayoutSuccessor(I.getOperand(0).getMBB()))
+      return false;
+    I.eraseFromParent();
+    return true;
+  }
   case TargetOpcode::G_SHL:
     return earlySelectSHL(I, MRI);
   case TargetOpcode::G_CONSTANT: {

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
index 2c53f6df4d4f..f6aa16784b25 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-binop.mir
@@ -330,7 +330,6 @@ body:             |
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x80000000)
   ; CHECK:   [[COPY:%[0-9]+]]:gpr32sp = COPY $w0
-  ; CHECK:   B %bb.1
   ; CHECK: bb.1:
   ; CHECK:   [[ADDWri:%[0-9]+]]:gpr32sp = ADDWri [[COPY]], 1, 0
   ; CHECK:   $w0 = COPY [[ADDWri]]

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt-constrain.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt-constrain.mir
index 082bf43061da..6df6573b3533 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt-constrain.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-jump-table-brjt-constrain.mir
@@ -35,7 +35,6 @@ body:             |
   ; CHECK:   BR %6
   ; CHECK: bb.2:
   ; CHECK:   successors: %bb.3(0x80000000)
-  ; CHECK:   B %bb.3
   ; CHECK: bb.3:
   ; CHECK:   RET_ReallyLR
   bb.1:

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
index a309daab0b4c..f0ae4f17b2ee 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-returnaddress-liveins.mir
@@ -19,7 +19,6 @@ body:             |
   ; CHECK:   successors: %bb.1(0x80000000)
   ; CHECK:   liveins: $w0, $x0, $lr
   ; CHECK:   [[COPY:%[0-9]+]]:gpr64sp = COPY $lr
-  ; CHECK:   B %bb.1
   ; CHECK: bb.1:
   ; CHECK:   [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]]
   ; CHECK:   $x0 = COPY [[COPY1]]
@@ -47,7 +46,6 @@ body:             |
   ; CHECK:   successors: %bb.1(0x80000000)
   ; CHECK:   liveins: $w0, $x0, $lr
   ; CHECK:   [[COPY:%[0-9]+]]:gpr64sp = COPY $lr
-  ; CHECK:   B %bb.1
   ; CHECK: bb.1:
   ; CHECK:   [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]]
   ; CHECK:   $x0 = COPY [[COPY1]]
@@ -78,7 +76,6 @@ body:             |
   ; CHECK:   liveins: $w0, $x0, $lr
   ; CHECK:   [[COPY:%[0-9]+]]:gpr64sp = COPY $lr
   ; CHECK:   [[COPY1:%[0-9]+]]:gpr64 = COPY [[COPY]]
-  ; CHECK:   B %bb.1
   ; CHECK: bb.1:
   ; CHECK:   $x0 = COPY [[COPY1]]
   ; CHECK:   [[COPY2:%[0-9]+]]:gpr64 = COPY [[COPY]]

diff  --git a/llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir b/llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir
index cc75386271c8..5b39ade02774 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/select-xor.mir
@@ -132,7 +132,6 @@ body:             |
   ; CHECK-LABEL: name: xor_constant_n1_s32_gpr_2bb
   ; CHECK: bb.0:
   ; CHECK:   successors: %bb.1(0x80000000)
-  ; CHECK:   B %bb.1
   ; CHECK: bb.1:
   ; CHECK:   [[COPY:%[0-9]+]]:gpr32 = COPY $w0
   ; CHECK:   [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[COPY]]

diff  --git a/llvm/test/CodeGen/AArch64/unwind-preserved.ll b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
index cf2a8e9b4a36..68fec0825542 100644
--- a/llvm/test/CodeGen/AArch64/unwind-preserved.ll
+++ b/llvm/test/CodeGen/AArch64/unwind-preserved.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --extra_scrub
 ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -O0 -global-isel=0 -global-isel-abort=0 < %s | FileCheck %s
-; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -O0 -global-isel=1 -global-isel-abort=0 < %s | FileCheck %s
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -O0 -global-isel=1 -global-isel-abort=0 < %s | FileCheck %s --check-prefix=GISEL
 
 ; Test that z0 is saved/restored, as the unwinder may only retain the low 64bits (d0).
 define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) personality i8 0 {
@@ -125,6 +125,128 @@ define <vscale x 4 x i32> @invoke_callee_may_throw_sve(<vscale x 4 x i32> %v) pe
 ; CHECK-NEXT:    addvl sp, sp, #18
 ; CHECK-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: invoke_callee_may_throw_sve:
+; GISEL:       .Lfunc_begin0:
+; GISEL-NEXT:    .cfi_startproc
+; GISEL-NEXT:  // %bb.0:
+; GISEL-NEXT:    stp x29, x30, [sp, #-16]! // 16-byte Folded Spill
+; GISEL-NEXT:    addvl sp, sp, #-18
+; GISEL-NEXT:    str p15, [sp, #4, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p14, [sp, #5, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p13, [sp, #6, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p12, [sp, #7, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p11, [sp, #8, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p10, [sp, #9, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p9, [sp, #10, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p8, [sp, #11, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p7, [sp, #12, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p6, [sp, #13, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p5, [sp, #14, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str p4, [sp, #15, mul vl] // 2-byte Folded Spill
+; GISEL-NEXT:    str z23, [sp, #2, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z22, [sp, #3, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z21, [sp, #4, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z20, [sp, #5, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z19, [sp, #6, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z18, [sp, #7, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z17, [sp, #8, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z16, [sp, #9, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z15, [sp, #10, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z14, [sp, #11, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z13, [sp, #12, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z12, [sp, #13, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z11, [sp, #14, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z10, [sp, #15, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z9, [sp, #16, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    str z8, [sp, #17, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    addvl sp, sp, #-2
+; GISEL-NEXT:    .cfi_escape 0x0f, 0x0d, 0x8f, 0x00, 0x11, 0x10, 0x22, 0x11, 0xa0, 0x01, 0x92, 0x2e, 0x00, 0x1e, 0x22 // sp + 16 + 160 * VG
+; GISEL-NEXT:    .cfi_escape 0x10, 0x48, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x78, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d8 @ cfa - 16 - 8 * VG
+; GISEL-NEXT:    .cfi_escape 0x10, 0x49, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x70, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d9 @ cfa - 16 - 16 * VG
+; GISEL-NEXT:    .cfi_escape 0x10, 0x4a, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x68, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d10 @ cfa - 16 - 24 * VG
+; GISEL-NEXT:    .cfi_escape 0x10, 0x4b, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x60, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d11 @ cfa - 16 - 32 * VG
+; GISEL-NEXT:    .cfi_escape 0x10, 0x4c, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x58, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d12 @ cfa - 16 - 40 * VG
+; GISEL-NEXT:    .cfi_escape 0x10, 0x4d, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x50, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d13 @ cfa - 16 - 48 * VG
+; GISEL-NEXT:    .cfi_escape 0x10, 0x4e, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x48, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d14 @ cfa - 16 - 56 * VG
+; GISEL-NEXT:    .cfi_escape 0x10, 0x4f, 0x0a, 0x11, 0x70, 0x22, 0x11, 0x40, 0x92, 0x2e, 0x00, 0x1e, 0x22 // $d15 @ cfa - 16 - 64 * VG
+; GISEL-NEXT:    .cfi_offset w30, -8
+; GISEL-NEXT:    .cfi_offset w29, -16
+; GISEL-NEXT:  .Ltmp0:
+; GISEL-NEXT:    str z0, [sp, #1, mul vl] // 16-byte Folded Spill
+; GISEL-NEXT:    bl may_throw_sve
+; GISEL-NEXT:  .Ltmp1:
+; GISEL-NEXT:    str z0, [sp] // 16-byte Folded Spill
+; GISEL-NEXT:    b .LBB0_1
+; GISEL-NEXT:  .LBB0_1: // %.Lcontinue
+; GISEL-NEXT:    ldr z0, [sp] // 16-byte Folded Reload
+; GISEL-NEXT:    addvl sp, sp, #2
+; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    addvl sp, sp, #18
+; GISEL-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; GISEL-NEXT:    ret
+; GISEL-NEXT:  .LBB0_2: // %.Lunwind
+; GISEL-NEXT:  .Ltmp2:
+; GISEL-NEXT:    ldr z0, [sp, #1, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    addvl sp, sp, #2
+; GISEL-NEXT:    ldr p15, [sp, #4, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p14, [sp, #5, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p13, [sp, #6, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p12, [sp, #7, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p11, [sp, #8, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p10, [sp, #9, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p9, [sp, #10, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p8, [sp, #11, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p7, [sp, #12, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p6, [sp, #13, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p5, [sp, #14, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr p4, [sp, #15, mul vl] // 2-byte Folded Reload
+; GISEL-NEXT:    ldr z23, [sp, #2, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z22, [sp, #3, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z21, [sp, #4, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z20, [sp, #5, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z19, [sp, #6, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z18, [sp, #7, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z17, [sp, #8, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z16, [sp, #9, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z15, [sp, #10, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z14, [sp, #11, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z13, [sp, #12, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z12, [sp, #13, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z11, [sp, #14, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z10, [sp, #15, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z9, [sp, #16, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    ldr z8, [sp, #17, mul vl] // 16-byte Folded Reload
+; GISEL-NEXT:    addvl sp, sp, #18
+; GISEL-NEXT:    ldp x29, x30, [sp], #16 // 16-byte Folded Reload
+; GISEL-NEXT:    ret
   %result = invoke <vscale x 4 x i32> @may_throw_sve(<vscale x 4 x i32> %v) to label %.Lcontinue unwind label %.Lunwind
 .Lcontinue:
   ret <vscale x 4 x i32> %result
@@ -204,6 +326,72 @@ define aarch64_vector_pcs <4 x i32> @invoke_callee_may_throw_neon(<4 x i32> %v)
 ; CHECK-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
 ; CHECK-NEXT:    add sp, sp, #304 // =304
 ; CHECK-NEXT:    ret
+;
+; GISEL-LABEL: invoke_callee_may_throw_neon:
+; GISEL:       .Lfunc_begin1:
+; GISEL-NEXT:    .cfi_startproc
+; GISEL-NEXT:  // %bb.0:
+; GISEL-NEXT:    sub sp, sp, #304 // =304
+; GISEL-NEXT:    stp q23, q22, [sp, #32] // 32-byte Folded Spill
+; GISEL-NEXT:    stp q21, q20, [sp, #64] // 32-byte Folded Spill
+; GISEL-NEXT:    stp q19, q18, [sp, #96] // 32-byte Folded Spill
+; GISEL-NEXT:    stp q17, q16, [sp, #128] // 32-byte Folded Spill
+; GISEL-NEXT:    stp q15, q14, [sp, #160] // 32-byte Folded Spill
+; GISEL-NEXT:    stp q13, q12, [sp, #192] // 32-byte Folded Spill
+; GISEL-NEXT:    stp q11, q10, [sp, #224] // 32-byte Folded Spill
+; GISEL-NEXT:    stp q9, q8, [sp, #256] // 32-byte Folded Spill
+; GISEL-NEXT:    stp x29, x30, [sp, #288] // 16-byte Folded Spill
+; GISEL-NEXT:    .cfi_def_cfa_offset 304
+; GISEL-NEXT:    .cfi_offset w30, -8
+; GISEL-NEXT:    .cfi_offset w29, -16
+; GISEL-NEXT:    .cfi_offset b8, -32
+; GISEL-NEXT:    .cfi_offset b9, -48
+; GISEL-NEXT:    .cfi_offset b10, -64
+; GISEL-NEXT:    .cfi_offset b11, -80
+; GISEL-NEXT:    .cfi_offset b12, -96
+; GISEL-NEXT:    .cfi_offset b13, -112
+; GISEL-NEXT:    .cfi_offset b14, -128
+; GISEL-NEXT:    .cfi_offset b15, -144
+; GISEL-NEXT:    .cfi_offset b16, -160
+; GISEL-NEXT:    .cfi_offset b17, -176
+; GISEL-NEXT:    .cfi_offset b18, -192
+; GISEL-NEXT:    .cfi_offset b19, -208
+; GISEL-NEXT:    .cfi_offset b20, -224
+; GISEL-NEXT:    .cfi_offset b21, -240
+; GISEL-NEXT:    .cfi_offset b22, -256
+; GISEL-NEXT:    .cfi_offset b23, -272
+; GISEL-NEXT:  .Ltmp3:
+; GISEL-NEXT:    str q0, [sp, #16] // 16-byte Folded Spill
+; GISEL-NEXT:    bl may_throw_neon
+; GISEL-NEXT:  .Ltmp4:
+; GISEL-NEXT:    str q0, [sp] // 16-byte Folded Spill
+; GISEL-NEXT:  // %bb.1: // %.Lcontinue
+; GISEL-NEXT:    ldr q0, [sp] // 16-byte Folded Reload
+; GISEL-NEXT:    ldp x29, x30, [sp, #288] // 16-byte Folded Reload
+; GISEL-NEXT:    ldp q9, q8, [sp, #256] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q11, q10, [sp, #224] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q13, q12, [sp, #192] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q15, q14, [sp, #160] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q17, q16, [sp, #128] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q19, q18, [sp, #96] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
+; GISEL-NEXT:    add sp, sp, #304 // =304
+; GISEL-NEXT:    ret
+; GISEL-NEXT:  .LBB1_2: // %.Lunwind
+; GISEL-NEXT:  .Ltmp5:
+; GISEL-NEXT:    ldr q0, [sp, #16] // 16-byte Folded Reload
+; GISEL-NEXT:    ldp x29, x30, [sp, #288] // 16-byte Folded Reload
+; GISEL-NEXT:    ldp q9, q8, [sp, #256] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q11, q10, [sp, #224] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q13, q12, [sp, #192] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q15, q14, [sp, #160] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q17, q16, [sp, #128] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q19, q18, [sp, #96] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q21, q20, [sp, #64] // 32-byte Folded Reload
+; GISEL-NEXT:    ldp q23, q22, [sp, #32] // 32-byte Folded Reload
+; GISEL-NEXT:    add sp, sp, #304 // =304
+; GISEL-NEXT:    ret
   %result = invoke aarch64_vector_pcs <4 x i32> @may_throw_neon(<4 x i32> %v) to label %.Lcontinue unwind label %.Lunwind
 .Lcontinue:
   ret <4 x i32> %result


        


More information about the llvm-commits mailing list