[llvm] 0e826f0 - Refactored, added MIR test.
Vladislav Dzhidzhoev via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 5 07:07:23 PDT 2023
Author: Vladislav Dzhidzhoev
Date: 2023-09-05T16:00:48+02:00
New Revision: 0e826f0e6dbedc09db29aa62f3fd34db8fee328f
URL: https://github.com/llvm/llvm-project/commit/0e826f0e6dbedc09db29aa62f3fd34db8fee328f
DIFF: https://github.com/llvm/llvm-project/commit/0e826f0e6dbedc09db29aa62f3fd34db8fee328f.diff
LOG: Refactored, added MIR test.
Added:
llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-unmerge-ext.mir
Modified:
llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
index e9386d77b2559f..d2a99b65cc6aab 100644
--- a/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
+++ b/llvm/lib/Target/AArch64/GISel/AArch64PostLegalizerLowering.cpp
@@ -1071,18 +1071,18 @@ void applyVectorSextInReg(MachineInstr &MI, MachineRegisterInfo &MRI,
bool matchUnmergeExtToUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI,
Register &MatchInfo) {
assert(MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
- if (MI.getNumDefs() != 2)
+ auto &Unmerge = cast<GUnmerge>(MI);
+ if (Unmerge.getNumDefs() != 2)
return false;
- if (!MRI.use_nodbg_empty(MI.getOperand(1).getReg()))
+ if (!MRI.use_nodbg_empty(Unmerge.getOperand(1).getReg()))
return false;
- LLT DstTy = MRI.getType(MI.getOperand(0).getReg());
+ LLT DstTy = MRI.getType(Unmerge.getOperand(0).getReg());
if (!DstTy.isVector())
return false;
- MachineInstr *Ext = getDefIgnoringCopies(
- MI.getOperand(MI.getNumExplicitDefs()).getReg(), MRI);
- if (!Ext || Ext->getOpcode() != AArch64::G_EXT)
+ MachineInstr *Ext = getOpcodeDef(AArch64::G_EXT, Unmerge.getSourceReg(), MRI);
+ if (!Ext)
return false;
Register ExtSrc1 = Ext->getOperand(1).getReg();
@@ -1092,13 +1092,11 @@ bool matchUnmergeExtToUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI,
if (!LowestVal || LowestVal->Value.getZExtValue() != DstTy.getSizeInBytes())
return false;
- MachineInstr *Undef = getDefIgnoringCopies(ExtSrc2, MRI);
- if (!Undef)
+ if (!getOpcodeDef<GImplicitDef>(ExtSrc2, MRI))
return false;
MatchInfo = ExtSrc1;
-
- return Undef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF;
+ return true;
}
void applyUnmergeExtToUnmerge(MachineInstr &MI, MachineRegisterInfo &MRI,
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-unmerge-ext.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-unmerge-ext.mir
new file mode 100644
index 00000000000000..2452c3083cc86e
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizer-lowering-unmerge-ext.mir
@@ -0,0 +1,154 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-lowering -global-isel -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name: v4s32
+legalized: true
+body: |
+ bb.0.entry:
+ liveins: $q0
+ ; CHECK-LABEL: name: v4s32
+ ; CHECK: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<4 x s32>) = COPY $q0
+ ; CHECK-NEXT: %unused:_(<2 x s32>), %unmerge:_(<2 x s32>) = G_UNMERGE_VALUES %v1(<4 x s32>)
+ ; CHECK-NEXT: %fpext:_(<2 x s64>) = G_FPEXT %unmerge(<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY %fpext(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+ %v1:_(<4 x s32>) = COPY $q0
+ %implicit:_(<4 x s32>) = G_IMPLICIT_DEF
+ %C:_(s32) = G_CONSTANT i32 8
+ %ext:_(<4 x s32>) = G_EXT %v1:_, %implicit:_, %C:_(s32)
+ %unmerge:_(<2 x s32>), %unused:_(<2 x s32>) = G_UNMERGE_VALUES %ext:_(<4 x s32>)
+ %fpext:_(<2 x s64>) = G_FPEXT %unmerge:_(<2 x s32>)
+ $q0 = COPY %fpext
+ RET_ReallyLR implicit $q0
+...
+---
+name: v8s16
+legalized: true
+body: |
+ bb.0.entry:
+ liveins: $q0
+ ; CHECK-LABEL: name: v8s16
+ ; CHECK: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<8 x s16>) = COPY $q0
+ ; CHECK-NEXT: %unused:_(<4 x s16>), %unmerge:_(<4 x s16>) = G_UNMERGE_VALUES %v1(<8 x s16>)
+ ; CHECK-NEXT: %fpext:_(<4 x s32>) = G_FPEXT %unmerge(<4 x s16>)
+ ; CHECK-NEXT: $q0 = COPY %fpext(<4 x s32>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+ %v1:_(<8 x s16>) = COPY $q0
+ %implicit:_(<8 x s16>) = G_IMPLICIT_DEF
+ %C:_(s32) = G_CONSTANT i32 8
+ %ext:_(<8 x s16>) = G_EXT %v1:_, %implicit:_, %C:_(s32)
+ %unmerge:_(<4 x s16>), %unused:_(<4 x s16>) = G_UNMERGE_VALUES %ext:_(<8 x s16>)
+ %fpext:_(<4 x s32>) = G_FPEXT %unmerge:_(<4 x s16>)
+ $q0 = COPY %fpext
+ RET_ReallyLR implicit $q0
+...
+---
+name: v16s8
+legalized: true
+body: |
+ bb.0.entry:
+ liveins: $q0
+ ; CHECK-LABEL: name: v16s8
+ ; CHECK: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<16 x s8>) = COPY $q0
+ ; CHECK-NEXT: %unused:_(<8 x s8>), %unmerge:_(<8 x s8>) = G_UNMERGE_VALUES %v1(<16 x s8>)
+ ; CHECK-NEXT: %fpext:_(<8 x s16>) = G_FPEXT %unmerge(<8 x s8>)
+ ; CHECK-NEXT: $q0 = COPY %fpext(<8 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+ %v1:_(<16 x s8>) = COPY $q0
+ %implicit:_(<16 x s8>) = G_IMPLICIT_DEF
+ %C:_(s32) = G_CONSTANT i32 8
+ %ext:_(<16 x s8>) = G_EXT %v1:_, %implicit:_, %C:_(s32)
+ %unmerge:_(<8 x s8>), %unused:_(<8 x s8>) = G_UNMERGE_VALUES %ext:_(<16 x s8>)
+ %fpext:_(<8 x s16>) = G_FPEXT %unmerge:_(<8 x s8>)
+ $q0 = COPY %fpext
+ RET_ReallyLR implicit $q0
+...
+---
+name: skip_not_const
+legalized: true
+body: |
+ bb.0.entry:
+ liveins: $q0, $w0
+ ; CHECK-LABEL: name: skip_not_const
+ ; CHECK: liveins: $q0, $w0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<16 x s8>) = COPY $q0
+ ; CHECK-NEXT: %implicit:_(<16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: %C:_(s32) = COPY $w0
+ ; CHECK-NEXT: %ext:_(<16 x s8>) = G_EXT %v1, %implicit, %C(s32)
+ ; CHECK-NEXT: %unmerge:_(<8 x s8>), %unused:_(<8 x s8>) = G_UNMERGE_VALUES %ext(<16 x s8>)
+ ; CHECK-NEXT: %fpext:_(<8 x s16>) = G_FPEXT %unmerge(<8 x s8>)
+ ; CHECK-NEXT: $q0 = COPY %fpext(<8 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+ %v1:_(<16 x s8>) = COPY $q0
+ %implicit:_(<16 x s8>) = G_IMPLICIT_DEF
+ %C:_(s32) = COPY $w0
+ %ext:_(<16 x s8>) = G_EXT %v1:_, %implicit:_, %C:_(s32)
+ %unmerge:_(<8 x s8>), %unused:_(<8 x s8>) = G_UNMERGE_VALUES %ext:_(<16 x s8>)
+ %fpext:_(<8 x s16>) = G_FPEXT %unmerge:_(<8 x s8>)
+ $q0 = COPY %fpext
+ RET_ReallyLR implicit $q0
+...
+---
+name: skip_not_unused
+legalized: true
+body: |
+ bb.0.entry:
+ liveins: $q0
+ ; CHECK-LABEL: name: skip_not_unused
+ ; CHECK: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<16 x s8>) = COPY $q0
+ ; CHECK-NEXT: %implicit:_(<16 x s8>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: %C:_(s32) = G_CONSTANT i32 8
+ ; CHECK-NEXT: %ext:_(<16 x s8>) = G_EXT %v1, %implicit, %C(s32)
+ ; CHECK-NEXT: %unmerge:_(<8 x s8>), %unused:_(<8 x s8>) = G_UNMERGE_VALUES %ext(<16 x s8>)
+ ; CHECK-NEXT: %fpext:_(<8 x s16>) = G_FPEXT %unmerge(<8 x s8>)
+ ; CHECK-NEXT: %fpext2:_(<8 x s16>) = G_FPEXT %unused(<8 x s8>)
+ ; CHECK-NEXT: $q0 = COPY %fpext(<8 x s16>)
+ ; CHECK-NEXT: $q1 = COPY %fpext2(<8 x s16>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0, implicit $q1
+ %v1:_(<16 x s8>) = COPY $q0
+ %implicit:_(<16 x s8>) = G_IMPLICIT_DEF
+ %C:_(s32) = G_CONSTANT i32 8
+ %ext:_(<16 x s8>) = G_EXT %v1:_, %implicit:_, %C:_(s32)
+ %unmerge:_(<8 x s8>), %unused:_(<8 x s8>) = G_UNMERGE_VALUES %ext:_(<16 x s8>)
+ %fpext:_(<8 x s16>) = G_FPEXT %unmerge:_(<8 x s8>)
+ %fpext2:_(<8 x s16>) = G_FPEXT %unused:_(<8 x s8>)
+ $q0 = COPY %fpext
+ $q1 = COPY %fpext2
+ RET_ReallyLR implicit $q0, implicit $q1
+...
+---
+name: skip_borders
+legalized: true
+body: |
+ bb.0.entry:
+ liveins: $q0
+ ; CHECK-LABEL: name: skip_borders
+ ; CHECK: liveins: $q0
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: %v1:_(<4 x s32>) = COPY $q0
+ ; CHECK-NEXT: %implicit:_(<4 x s32>) = G_IMPLICIT_DEF
+ ; CHECK-NEXT: %C:_(s32) = G_CONSTANT i32 9
+ ; CHECK-NEXT: %ext:_(<4 x s32>) = G_EXT %v1, %implicit, %C(s32)
+ ; CHECK-NEXT: %unmerge:_(<2 x s32>), %unused:_(<2 x s32>) = G_UNMERGE_VALUES %ext(<4 x s32>)
+ ; CHECK-NEXT: %fpext:_(<2 x s64>) = G_FPEXT %unmerge(<2 x s32>)
+ ; CHECK-NEXT: $q0 = COPY %fpext(<2 x s64>)
+ ; CHECK-NEXT: RET_ReallyLR implicit $q0
+ %v1:_(<4 x s32>) = COPY $q0
+ %implicit:_(<4 x s32>) = G_IMPLICIT_DEF
+ %C:_(s32) = G_CONSTANT i32 9
+ %ext:_(<4 x s32>) = G_EXT %v1:_, %implicit:_, %C:_(s32)
+ %unmerge:_(<2 x s32>), %unused:_(<2 x s32>) = G_UNMERGE_VALUES %ext:_(<4 x s32>)
+ %fpext:_(<2 x s64>) = G_FPEXT %unmerge:_(<2 x s32>)
+ $q0 = COPY %fpext
+ RET_ReallyLR implicit $q0
+...
More information about the llvm-commits
mailing list