[llvm] d337c1f - [AArch64] Use SUBXrx64 for dynamic stack to refer to sp
via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 19 20:46:17 PDT 2022
Author: chenglin.bi
Date: 2022-07-20T11:46:10+08:00
New Revision: d337c1f2565a748bcf3f162d0754a362b2c597a4
URL: https://github.com/llvm/llvm-project/commit/d337c1f2565a748bcf3f162d0754a362b2c597a4
DIFF: https://github.com/llvm/llvm-project/commit/d337c1f2565a748bcf3f162d0754a362b2c597a4.diff
LOG: [AArch64] Use SUBXrx64 for dynamic stack to refer to sp
When we lower dynamic stack, we need to substract pattern `x15 << 4` from sp.
Subtract instruction with arith shifted register(SUBXrs) can't refer to sp. So for now we need two extra mov like:
```
mov x0, sp
sub x0, x0, x15, lsl #4
mov sp, x0
```
If we want to refer to sp in subtract instruction like this:
```
sub sp, sp, x15, lsl #4
```
We must use arith extended register version(SUBXrx).
So in this patch when we find sub have sp operand on src0, try to select to SubXrx64.
Reviewed By: efriedma
Differential Revision: https://reviews.llvm.org/D129932
Added:
Modified:
llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
llvm/lib/Target/AArch64/AArch64InstrFormats.td
llvm/lib/Target/AArch64/AArch64InstrInfo.td
llvm/test/CodeGen/AArch64/win-alloca.ll
llvm/test/CodeGen/AArch64/win64_vararg.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
index 82fe5772c99de..5fbb8e5f81dfe 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
@@ -69,6 +69,7 @@ class AArch64DAGToDAGISel : public SelectionDAGISel {
bool tryMLAV64LaneV128(SDNode *N);
bool tryMULLV64LaneV128(unsigned IntNo, SDNode *N);
bool SelectArithExtendedRegister(SDValue N, SDValue &Reg, SDValue &Shift);
+ bool SelectArithUXTXRegister(SDValue N, SDValue &Reg, SDValue &Shift);
bool SelectArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
bool SelectNegArithImmed(SDValue N, SDValue &Val, SDValue &Shift);
bool SelectArithShiftedRegister(SDValue N, SDValue &Reg, SDValue &Shift) {
@@ -893,6 +894,30 @@ bool AArch64DAGToDAGISel::SelectArithExtendedRegister(SDValue N, SDValue &Reg,
return isWorthFolding(N);
}
+/// SelectArithUXTXRegister - Select a "UXTX register" operand. This
+/// operand is refered by the instructions have SP operand
+bool AArch64DAGToDAGISel::SelectArithUXTXRegister(SDValue N, SDValue &Reg,
+ SDValue &Shift) {
+ unsigned ShiftVal = 0;
+ AArch64_AM::ShiftExtendType Ext;
+
+ if (N.getOpcode() != ISD::SHL)
+ return false;
+
+ ConstantSDNode *CSD = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ if (!CSD)
+ return false;
+ ShiftVal = CSD->getZExtValue();
+ if (ShiftVal > 4)
+ return false;
+
+ Ext = AArch64_AM::UXTX;
+ Reg = N.getOperand(0);
+ Shift = CurDAG->getTargetConstant(getArithExtendImm(Ext, ShiftVal), SDLoc(N),
+ MVT::i32);
+ return isWorthFolding(N);
+}
+
/// If there's a use of this ADDlow that's not itself a load/store then we'll
/// need to create a real ADD instruction from it anyway and there's no point in
/// folding it into the mem op. Theoretically, it shouldn't matter, but there's
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 7df9831d598fa..e70d304f37b92 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -1168,6 +1168,8 @@ def gi_arith_extended_reg32to64_i64 :
GIComplexOperandMatcher<s64, "selectArithExtendedRegister">,
GIComplexPatternEquiv<arith_extended_reg32to64_i64>;
+def arith_uxtx : ComplexPattern<i64, 2, "SelectArithUXTXRegister", []>;
+
// Floating-point immediate.
def fpimm16XForm : SDNodeXForm<fpimm, [{
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index d444223e4494f..13428b30c52dc 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -1691,6 +1691,11 @@ def : InstAlias<"mov $dst, $src",
defm ADDS : AddSubS<0, "adds", AArch64add_flag, "cmn", "subs", "cmp">;
defm SUBS : AddSubS<1, "subs", AArch64sub_flag, "cmp", "adds", "cmn">;
+def copyFromSP: PatLeaf<(i64 GPR64:$src), [{
+ return N->getOpcode() == ISD::CopyFromReg &&
+ cast<RegisterSDNode>(N->getOperand(1))->getReg() == AArch64::SP;
+}]>;
+
// Use SUBS instead of SUB to enable CSE between SUBS and SUB.
def : Pat<(sub GPR32sp:$Rn, addsub_shifted_imm32:$imm),
(SUBSWri GPR32sp:$Rn, addsub_shifted_imm32:$imm)>;
@@ -1709,6 +1714,8 @@ def : Pat<(sub GPR32sp:$R2, arith_extended_reg32_i32:$R3),
(SUBSWrx GPR32sp:$R2, arith_extended_reg32_i32:$R3)>;
def : Pat<(sub GPR64sp:$R2, arith_extended_reg32to64_i64:$R3),
(SUBSXrx GPR64sp:$R2, arith_extended_reg32to64_i64:$R3)>;
+def : Pat<(sub copyFromSP:$R2, (arith_uxtx GPR64:$R3, arith_extendlsl64:$imm)),
+ (SUBXrx64 GPR64sp:$R2, GPR64:$R3, arith_extendlsl64:$imm)>;
}
// Because of the immediate format for add/sub-imm instructions, the
diff --git a/llvm/test/CodeGen/AArch64/win-alloca.ll b/llvm/test/CodeGen/AArch64/win-alloca.ll
index f26b33e24a746..9c34cabbb5471 100644
--- a/llvm/test/CodeGen/AArch64/win-alloca.ll
+++ b/llvm/test/CodeGen/AArch64/win-alloca.ll
@@ -17,7 +17,6 @@ declare void @func2(i8*)
; CHECK: add [[REG1:x[0-9]+]], x0, #15
; CHECK-OPT: lsr x15, [[REG1]], #4
; CHECK: bl __chkstk
-; CHECK: mov [[REG2:x[0-9]+]], sp
-; CHECK-OPT: sub [[REG3:x[0-9]+]], [[REG2]], x15, lsl #4
+; CHECK-OPT: sub [[REG3:x[0-9]+]], sp, x15, lsl #4
; CHECK-OPT: mov sp, [[REG3]]
; CHECK: bl func2
diff --git a/llvm/test/CodeGen/AArch64/win64_vararg.ll b/llvm/test/CodeGen/AArch64/win64_vararg.ll
index 28ad0082ad543..954b2f54499d9 100644
--- a/llvm/test/CodeGen/AArch64/win64_vararg.ll
+++ b/llvm/test/CodeGen/AArch64/win64_vararg.ll
@@ -197,8 +197,7 @@ define void @vla(i32, i8*, ...) local_unnamed_addr {
; CHECK-NEXT: stp x5, x6, [x29, #48]
; CHECK-NEXT: str x7, [x29, #64]
; CHECK-NEXT: bl __chkstk
-; CHECK-NEXT: mov x8, sp
-; CHECK-NEXT: sub x20, x8, x15, lsl #4
+; CHECK-NEXT: sub x20, sp, x15, lsl #4
; CHECK-NEXT: mov sp, x20
; CHECK-NEXT: ldr x21, [x29, #16]
; CHECK-NEXT: sxtw x22, w0
More information about the llvm-commits
mailing list