[llvm] 0847cc0 - [NFC][AArch64] Use 'i' to encode the offset form of load/store.
Hsiangkai Wang via llvm-commits
llvm-commits at lists.llvm.org
Mon Mar 6 04:35:10 PST 2023
Author: Hsiangkai Wang
Date: 2023-03-06T12:34:19Z
New Revision: 0847cc06a6c4feae5414f062141fbbd9b9aea4e0
URL: https://github.com/llvm/llvm-project/commit/0847cc06a6c4feae5414f062141fbbd9b9aea4e0
DIFF: https://github.com/llvm/llvm-project/commit/0847cc06a6c4feae5414f062141fbbd9b9aea4e0.diff
LOG: [NFC][AArch64] Use 'i' to encode the offset form of load/store.
STG, STZG, ST2G, STZ2G are the exceptions to append 'Offset' to name the
offset format of load/store instructions. All other load/store
instructions use 'i' as the appendix. If there is no special reason to
do so, we should make the naming consistent.
Differential Revision: https://reviews.llvm.org/D141819
Added:
Modified:
llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
llvm/lib/Target/AArch64/AArch64InstrFormats.td
llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
llvm/lib/Target/AArch64/AArch64InstrInfo.td
llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
llvm/test/CodeGen/AArch64/ldst-opt-mte-with-dbg.mir
llvm/test/CodeGen/AArch64/ldst-opt-mte.mir
llvm/test/CodeGen/AArch64/settag-merge.mir
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
index 603dccdc82487..33b586e2c1b40 100644
--- a/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64FrameLowering.cpp
@@ -954,10 +954,10 @@ bool AArch64FrameLowering::shouldCombineCSRLocalStackBumpInEpilogue(
switch (LastI->getOpcode()) {
case AArch64::STGloop:
case AArch64::STZGloop:
- case AArch64::STGOffset:
- case AArch64::STZGOffset:
- case AArch64::ST2GOffset:
- case AArch64::STZ2GOffset:
+ case AArch64::STGi:
+ case AArch64::STZGi:
+ case AArch64::ST2Gi:
+ case AArch64::STZ2Gi:
return false;
default:
return true;
@@ -3455,8 +3455,8 @@ void TagStoreEdit::emitUnrolled(MachineBasicBlock::iterator InsertI) {
int64_t InstrSize = (Size > 16) ? 32 : 16;
unsigned Opcode =
InstrSize == 16
- ? (ZeroData ? AArch64::STZGOffset : AArch64::STGOffset)
- : (ZeroData ? AArch64::STZ2GOffset : AArch64::ST2GOffset);
+ ? (ZeroData ? AArch64::STZGi : AArch64::STGi)
+ : (ZeroData ? AArch64::STZ2Gi : AArch64::ST2Gi);
assert(BaseRegOffsetBytes % 16 == 0);
MachineInstr *I = BuildMI(*MBB, InsertI, DL, TII->get(Opcode))
.addReg(AArch64::SP)
@@ -3638,8 +3638,8 @@ bool isMergeableStackTaggingInstruction(MachineInstr &MI, int64_t &Offset,
const MachineFrameInfo &MFI = MF.getFrameInfo();
unsigned Opcode = MI.getOpcode();
- ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGOffset ||
- Opcode == AArch64::STZ2GOffset);
+ ZeroData = (Opcode == AArch64::STZGloop || Opcode == AArch64::STZGi ||
+ Opcode == AArch64::STZ2Gi);
if (Opcode == AArch64::STGloop || Opcode == AArch64::STZGloop) {
if (!MI.getOperand(0).isDead() || !MI.getOperand(1).isDead())
@@ -3651,9 +3651,9 @@ bool isMergeableStackTaggingInstruction(MachineInstr &MI, int64_t &Offset,
return true;
}
- if (Opcode == AArch64::STGOffset || Opcode == AArch64::STZGOffset)
+ if (Opcode == AArch64::STGi || Opcode == AArch64::STZGi)
Size = 16;
- else if (Opcode == AArch64::ST2GOffset || Opcode == AArch64::STZ2GOffset)
+ else if (Opcode == AArch64::ST2Gi || Opcode == AArch64::STZ2Gi)
Size = 32;
else
return false;
@@ -3903,10 +3903,10 @@ void AArch64FrameLowering::orderFrameObjects(
case AArch64::STZGloop:
OpIndex = 3;
break;
- case AArch64::STGOffset:
- case AArch64::STZGOffset:
- case AArch64::ST2GOffset:
- case AArch64::STZ2GOffset:
+ case AArch64::STGi:
+ case AArch64::STZGi:
+ case AArch64::ST2Gi:
+ case AArch64::STZ2Gi:
OpIndex = 1;
break;
default:
diff --git a/llvm/lib/Target/AArch64/AArch64InstrFormats.td b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
index 0d22a2c623641..0cd03dbf47718 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrFormats.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrFormats.td
@@ -4673,7 +4673,7 @@ class BaseMemTagStore<bits<2> opc1, bits<2> opc2, string asm_insn,
}
multiclass MemTagStore<bits<2> opc1, string insn> {
- def Offset :
+ def i :
BaseMemTagStore<opc1, 0b10, insn, "\t$Rt, [$Rn, $offset]", "",
(outs), (ins GPR64sp:$Rt, GPR64sp:$Rn, simm9s16:$offset)>;
def PreIndex :
@@ -4688,7 +4688,7 @@ multiclass MemTagStore<bits<2> opc1, string insn> {
(ins GPR64sp:$Rt, GPR64sp:$Rn, simm9s16:$offset)>;
def : InstAlias<insn # "\t$Rt, [$Rn]",
- (!cast<Instruction>(NAME # "Offset") GPR64sp:$Rt, GPR64sp:$Rn, 0)>;
+ (!cast<Instruction>(NAME # "i") GPR64sp:$Rt, GPR64sp:$Rn, 0)>;
}
//---
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 6a1617910b5f9..54ad05bc4698b 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -2369,7 +2369,7 @@ unsigned AArch64InstrInfo::getLoadStoreImmIdx(unsigned Opc) {
case AArch64::LDNF1D_IMM:
return 3;
case AArch64::ADDG:
- case AArch64::STGOffset:
+ case AArch64::STGi:
case AArch64::LDR_PXI:
case AArch64::STR_PXI:
return 2;
@@ -2874,8 +2874,8 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
MaxOffset = 63;
break;
case AArch64::LDG:
- case AArch64::STGOffset:
- case AArch64::STZGOffset:
+ case AArch64::STGi:
+ case AArch64::STZGi:
Scale = TypeSize::Fixed(16);
Width = 16;
MinOffset = -256;
@@ -3033,8 +3033,8 @@ bool AArch64InstrInfo::getMemOpInfo(unsigned Opcode, TypeSize &Scale,
MinOffset = -8;
MaxOffset = 7;
break;
- case AArch64::ST2GOffset:
- case AArch64::STZ2GOffset:
+ case AArch64::ST2Gi:
+ case AArch64::STZ2Gi:
Scale = TypeSize::Fixed(16);
Width = 32;
MinOffset = -256;
@@ -3151,10 +3151,10 @@ int AArch64InstrInfo::getMemScale(unsigned Opc) {
case AArch64::LDPQi:
case AArch64::LDRQpre:
case AArch64::STPQi:
- case AArch64::STGOffset:
- case AArch64::STZGOffset:
- case AArch64::ST2GOffset:
- case AArch64::STZ2GOffset:
+ case AArch64::STGi:
+ case AArch64::STZGi:
+ case AArch64::ST2Gi:
+ case AArch64::STZ2Gi:
case AArch64::STGPi:
return 16;
}
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 2ab27f5225ea2..c2ab263551b8a 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -2128,20 +2128,20 @@ defm ST2G : MemTagStore<0b10, "st2g">;
defm STZ2G : MemTagStore<0b11, "stz2g">;
def : Pat<(AArch64stg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
- (STGOffset $Rn, $Rm, $imm)>;
+ (STGi $Rn, $Rm, $imm)>;
def : Pat<(AArch64stzg GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
- (STZGOffset $Rn, $Rm, $imm)>;
+ (STZGi $Rn, $Rm, $imm)>;
def : Pat<(AArch64st2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
- (ST2GOffset $Rn, $Rm, $imm)>;
+ (ST2Gi $Rn, $Rm, $imm)>;
def : Pat<(AArch64stz2g GPR64sp:$Rn, (am_indexeds9s128 GPR64sp:$Rm, simm9s16:$imm)),
- (STZ2GOffset $Rn, $Rm, $imm)>;
+ (STZ2Gi $Rn, $Rm, $imm)>;
defm STGP : StorePairOffset <0b01, 0, GPR64z, simm7s16, "stgp">;
def STGPpre : StorePairPreIdx <0b01, 0, GPR64z, simm7s16, "stgp">;
def STGPpost : StorePairPostIdx<0b01, 0, GPR64z, simm7s16, "stgp">;
def : Pat<(int_aarch64_stg GPR64:$Rt, (am_indexeds9s128 GPR64sp:$Rn, simm9s16:$offset)),
- (STGOffset GPR64:$Rt, GPR64sp:$Rn, simm9s16:$offset)>;
+ (STGi GPR64:$Rt, GPR64sp:$Rn, simm9s16:$offset)>;
def : Pat<(int_aarch64_stgp (am_indexed7s128 GPR64sp:$Rn, simm7s16:$imm), GPR64:$Rt, GPR64:$Rt2),
(STGPi $Rt, $Rt2, $Rn, $imm)>;
diff --git a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
index afea2b2f5f754..4c13c095ff69f 100644
--- a/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
+++ b/llvm/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp
@@ -237,10 +237,10 @@ static bool isTagStore(const MachineInstr &MI) {
switch (MI.getOpcode()) {
default:
return false;
- case AArch64::STGOffset:
- case AArch64::STZGOffset:
- case AArch64::ST2GOffset:
- case AArch64::STZ2GOffset:
+ case AArch64::STGi:
+ case AArch64::STZGi:
+ case AArch64::ST2Gi:
+ case AArch64::STZ2Gi:
return true;
}
}
@@ -465,13 +465,13 @@ static unsigned getPreIndexedOpcode(unsigned Opc) {
return AArch64::STPWpre;
case AArch64::STPXi:
return AArch64::STPXpre;
- case AArch64::STGOffset:
+ case AArch64::STGi:
return AArch64::STGPreIndex;
- case AArch64::STZGOffset:
+ case AArch64::STZGi:
return AArch64::STZGPreIndex;
- case AArch64::ST2GOffset:
+ case AArch64::ST2Gi:
return AArch64::ST2GPreIndex;
- case AArch64::STZ2GOffset:
+ case AArch64::STZ2Gi:
return AArch64::STZ2GPreIndex;
case AArch64::STGPi:
return AArch64::STGPpre;
@@ -544,13 +544,13 @@ static unsigned getPostIndexedOpcode(unsigned Opc) {
return AArch64::STPWpost;
case AArch64::STPXi:
return AArch64::STPXpost;
- case AArch64::STGOffset:
+ case AArch64::STGi:
return AArch64::STGPostIndex;
- case AArch64::STZGOffset:
+ case AArch64::STZGi:
return AArch64::STZGPostIndex;
- case AArch64::ST2GOffset:
+ case AArch64::ST2Gi:
return AArch64::ST2GPostIndex;
- case AArch64::STZ2GOffset:
+ case AArch64::STZ2Gi:
return AArch64::STZ2GPostIndex;
case AArch64::STGPi:
return AArch64::STGPpost;
@@ -681,10 +681,10 @@ static bool isMergeableLdStUpdate(MachineInstr &MI) {
case AArch64::LDRWui:
case AArch64::LDRHHui:
case AArch64::LDRBBui:
- case AArch64::STGOffset:
- case AArch64::STZGOffset:
- case AArch64::ST2GOffset:
- case AArch64::STZ2GOffset:
+ case AArch64::STGi:
+ case AArch64::STZGi:
+ case AArch64::ST2Gi:
+ case AArch64::STZ2Gi:
case AArch64::STGPi:
// Unscaled instructions.
case AArch64::STURSi:
diff --git a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
index d9d5c2be03092..a6bf778cbc668 100644
--- a/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
+++ b/llvm/lib/Target/AArch64/AArch64SchedNeoverseN2.td
@@ -758,8 +758,8 @@ def : InstRW<[N2Write_1cyc_1L01_1D_1I], (instrs STGPreIndex, STGPostIndex,
// Store allocation tag to two granules, zeroing, signed offset
// Store allocation tag and reg pair to memory, signed offset
// Store multiple allocation tags
-def : InstRW<[N2Write_1cyc_1L01_1D], (instrs STGOffset, ST2GOffset, STZGOffset,
- STZ2GOffset, STGPi, STGM, STZGM)>;
+def : InstRW<[N2Write_1cyc_1L01_1D], (instrs STGi, ST2Gi, STZGi,
+ STZ2Gi, STGPi, STGM, STZGM)>;
// FP data processing instructions
// -----------------------------------------------------------------------------
diff --git a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
index 2cbac9783bbd6..41cd405c891ee 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTaggingPreRA.cpp
@@ -276,8 +276,8 @@ std::optional<int> AArch64StackTaggingPreRA::findFirstSlotCandidate() {
Register UseReg = WorkList.pop_back_val();
for (auto &UseI : MRI->use_instructions(UseReg)) {
unsigned Opcode = UseI.getOpcode();
- if (Opcode == AArch64::STGOffset || Opcode == AArch64::ST2GOffset ||
- Opcode == AArch64::STZGOffset || Opcode == AArch64::STZ2GOffset ||
+ if (Opcode == AArch64::STGi || Opcode == AArch64::ST2Gi ||
+ Opcode == AArch64::STZGi || Opcode == AArch64::STZ2Gi ||
Opcode == AArch64::STGPi || Opcode == AArch64::STGloop ||
Opcode == AArch64::STZGloop || Opcode == AArch64::STGloop_wback ||
Opcode == AArch64::STZGloop_wback)
diff --git a/llvm/test/CodeGen/AArch64/ldst-opt-mte-with-dbg.mir b/llvm/test/CodeGen/AArch64/ldst-opt-mte-with-dbg.mir
index 66a8a8e53928d..66b342432c2cd 100644
--- a/llvm/test/CodeGen/AArch64/ldst-opt-mte-with-dbg.mir
+++ b/llvm/test/CodeGen/AArch64/ldst-opt-mte-with-dbg.mir
@@ -14,7 +14,7 @@ body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = ADDXri $x0, 112, 0
@@ -30,7 +30,7 @@ body: |
bb.0.entry:
liveins: $x0, $x1
- STGOffset $x1, $x0, 0
+ STGi $x1, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = ADDXri $x0, 112, 0
@@ -40,14 +40,14 @@ body: |
...
# CHECK-LABEL: name: test_STG_post_unaligned
-# CHECK: STGOffset $x0, $x0, 0
+# CHECK: STGi $x0, $x0, 0
# CHECK-NEXT: ADDXri $x0, 8, 0
name: test_STG_post_unaligned
body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = ADDXri $x0, 8, 0
@@ -63,7 +63,7 @@ body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = SUBXri $x0, 4096, 0
@@ -73,14 +73,14 @@ body: |
...
# CHECK-LABEL: name: test_STG_post3
-# CHECK: STGOffset $x0, $x0, 0
+# CHECK: STGi $x0, $x0, 0
# CHECK-NEXT: SUBXri $x0, 4112, 0
name: test_STG_post3
body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = SUBXri $x0, 4112, 0
@@ -96,7 +96,7 @@ body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = ADDXri $x0, 4080, 0
@@ -106,14 +106,14 @@ body: |
...
# CHECK-LABEL: name: test_STG_post5
-# CHECK: STGOffset $x0, $x0, 0
+# CHECK: STGi $x0, $x0, 0
# CHECK-NEXT: ADDXri $x0, 4096, 0
name: test_STG_post5
body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = ADDXri $x0, 4096, 0
@@ -131,7 +131,7 @@ body: |
bb.0.entry:
liveins: $x0
- STZGOffset $x0, $x0, 0
+ STZGi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = ADDXri $x0, 112, 0
@@ -147,7 +147,7 @@ body: |
bb.0.entry:
liveins: $x0
- ST2GOffset $x0, $x0, 0
+ ST2Gi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = ADDXri $x0, 112, 0
@@ -163,7 +163,7 @@ body: |
bb.0.entry:
liveins: $x0
- STZ2GOffset $x0, $x0, 0
+ STZ2Gi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = ADDXri $x0, 112, 0
@@ -265,7 +265,7 @@ body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 10
+ STGi $x0, $x0, 10
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
$x0 = ADDXri $x0, 160, 0
@@ -302,7 +302,7 @@ body: |
$x0 = ADDXri $x0, 32, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
DBG_VALUE $x0, 0
DBG_VALUE $x0, 0
RET_ReallyLR implicit $x0
diff --git a/llvm/test/CodeGen/AArch64/ldst-opt-mte.mir b/llvm/test/CodeGen/AArch64/ldst-opt-mte.mir
index bc24224d897c1..382f79c048a50 100644
--- a/llvm/test/CodeGen/AArch64/ldst-opt-mte.mir
+++ b/llvm/test/CodeGen/AArch64/ldst-opt-mte.mir
@@ -11,7 +11,7 @@ body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
$x0 = ADDXri $x0, 112, 0
RET_ReallyLR implicit $x0
...
@@ -23,20 +23,20 @@ body: |
bb.0.entry:
liveins: $x0, $x1
- STGOffset $x1, $x0, 0
+ STGi $x1, $x0, 0
$x0 = ADDXri $x0, 112, 0
RET_ReallyLR implicit $x0
...
# CHECK-LABEL: name: test_STG_post_unaligned
-# CHECK: STGOffset $x0, $x0, 0
+# CHECK: STGi $x0, $x0, 0
# CHECK-NEXT: ADDXri $x0, 8, 0
name: test_STG_post_unaligned
body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
$x0 = ADDXri $x0, 8, 0
RET_ReallyLR implicit $x0
...
@@ -48,20 +48,20 @@ body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
$x0 = SUBXri $x0, 4096, 0
RET_ReallyLR implicit $x0
...
# CHECK-LABEL: name: test_STG_post3
-# CHECK: STGOffset $x0, $x0, 0
+# CHECK: STGi $x0, $x0, 0
# CHECK-NEXT: SUBXri $x0, 4112, 0
name: test_STG_post3
body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
$x0 = SUBXri $x0, 4112, 0
RET_ReallyLR implicit $x0
...
@@ -73,20 +73,20 @@ body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
$x0 = ADDXri $x0, 4080, 0
RET_ReallyLR implicit $x0
...
# CHECK-LABEL: name: test_STG_post5
-# CHECK: STGOffset $x0, $x0, 0
+# CHECK: STGi $x0, $x0, 0
# CHECK-NEXT: ADDXri $x0, 4096, 0
name: test_STG_post5
body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
$x0 = ADDXri $x0, 4096, 0
RET_ReallyLR implicit $x0
...
@@ -100,7 +100,7 @@ body: |
bb.0.entry:
liveins: $x0
- STZGOffset $x0, $x0, 0
+ STZGi $x0, $x0, 0
$x0 = ADDXri $x0, 112, 0
RET_ReallyLR implicit $x0
...
@@ -112,7 +112,7 @@ body: |
bb.0.entry:
liveins: $x0
- ST2GOffset $x0, $x0, 0
+ ST2Gi $x0, $x0, 0
$x0 = ADDXri $x0, 112, 0
RET_ReallyLR implicit $x0
...
@@ -124,7 +124,7 @@ body: |
bb.0.entry:
liveins: $x0
- STZ2GOffset $x0, $x0, 0
+ STZ2Gi $x0, $x0, 0
$x0 = ADDXri $x0, 112, 0
RET_ReallyLR implicit $x0
...
@@ -202,7 +202,7 @@ body: |
bb.0.entry:
liveins: $x0
- STGOffset $x0, $x0, 10
+ STGi $x0, $x0, 10
$x0 = ADDXri $x0, 160, 0
RET_ReallyLR implicit $x0
...
@@ -229,7 +229,7 @@ body: |
liveins: $x0
$x0 = ADDXri $x0, 32, 0
- STGOffset $x0, $x0, 0
+ STGi $x0, $x0, 0
RET_ReallyLR implicit $x0
...
diff --git a/llvm/test/CodeGen/AArch64/settag-merge.mir b/llvm/test/CodeGen/AArch64/settag-merge.mir
index 991e0a23e319d..03524e780df02 100644
--- a/llvm/test/CodeGen/AArch64/settag-merge.mir
+++ b/llvm/test/CodeGen/AArch64/settag-merge.mir
@@ -30,8 +30,8 @@
# A sequence of STG with a register copy in the middle.
# Can be merged into ST2G + ST2G.
# CHECK-LABEL: name:{{.*}}stg16_16_16_16_ret
-# CHECK-DAG: ST2GOffset $sp, $sp, 2
-# CHECK-DAG: ST2GOffset $sp, $sp, 0
+# CHECK-DAG: ST2Gi $sp, $sp, 2
+# CHECK-DAG: ST2Gi $sp, $sp, 0
# CHECK-DAG: $w0 = COPY $wzr
# CHECK-DAG: RET_ReallyLR implicit killed $w0
@@ -44,11 +44,11 @@ stack:
- { id: 3, name: d, size: 16, alignment: 16 }
body: |
bb.0.entry:
- STGOffset $sp, %stack.0.a, 0 :: (store (s128) into %ir.a)
- STGOffset $sp, %stack.1.b, 0 :: (store (s128) into %ir.b)
- STGOffset $sp, %stack.2.c, 0 :: (store (s128) into %ir.c)
+ STGi $sp, %stack.0.a, 0 :: (store (s128) into %ir.a)
+ STGi $sp, %stack.1.b, 0 :: (store (s128) into %ir.b)
+ STGi $sp, %stack.2.c, 0 :: (store (s128) into %ir.c)
$w0 = COPY $wzr
- STGOffset $sp, %stack.3.d, 0 :: (store (s128) into %ir.d)
+ STGi $sp, %stack.3.d, 0 :: (store (s128) into %ir.d)
RET_ReallyLR implicit killed $w0
...
@@ -56,12 +56,12 @@ body: |
---
# A store in the middle prevents merging.
# CHECK-LABEL: name:{{.*}}stg16_store_128
-# CHECK: ST2GOffset $sp, $sp, 2
-# CHECK: ST2GOffset $sp, $sp, 4
-# CHECK: ST2GOffset $sp, $sp, 6
-# CHECK: STGOffset $sp, $sp, 8
+# CHECK: ST2Gi $sp, $sp, 2
+# CHECK: ST2Gi $sp, $sp, 4
+# CHECK: ST2Gi $sp, $sp, 6
+# CHECK: STGi $sp, $sp, 8
# CHECK: STRBBui
-# CHECK: ST2GOffset $sp, $sp, 0
+# CHECK: ST2Gi $sp, $sp, 0
# CHECK: RET_ReallyLR
name: stg16_store_128
@@ -71,13 +71,13 @@ stack:
- { id: 1, name: b, size: 128, alignment: 16 }
body: |
bb.0.entry:
- STGOffset $sp, %stack.0.a, 0 :: (store (s128) into %ir.a)
+ STGi $sp, %stack.0.a, 0 :: (store (s128) into %ir.a)
renamable $w8 = MOVi32imm 42
- ST2GOffset $sp, %stack.1.b, 6 :: (store (s256) into %ir.b + 96, align 16)
- ST2GOffset $sp, %stack.1.b, 4 :: (store (s256) into %ir.b + 64, align 16)
- ST2GOffset $sp, %stack.1.b, 2 :: (store (s256) into %ir.b + 32, align 16)
+ ST2Gi $sp, %stack.1.b, 6 :: (store (s256) into %ir.b + 96, align 16)
+ ST2Gi $sp, %stack.1.b, 4 :: (store (s256) into %ir.b + 64, align 16)
+ ST2Gi $sp, %stack.1.b, 2 :: (store (s256) into %ir.b + 32, align 16)
STRBBui killed renamable $w8, %stack.0.a, 0 :: (store (s8) into %ir.a, align 16)
- ST2GOffset $sp, %stack.1.b, 0 :: (store (s256) into %ir.b, align 16)
+ ST2Gi $sp, %stack.1.b, 0 :: (store (s256) into %ir.b, align 16)
RET_ReallyLR
...
More information about the llvm-commits
mailing list