[llvm] bebe6a6 - [GlobalISel] Combine (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
Jessica Paquette via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 11 10:40:18 PDT 2020
Author: Jessica Paquette
Date: 2020-08-11T10:40:06-07:00
New Revision: bebe6a6449811e877f7eba3f1798ddd1fa83e440
URL: https://github.com/llvm/llvm-project/commit/bebe6a6449811e877f7eba3f1798ddd1fa83e440
DIFF: https://github.com/llvm/llvm-project/commit/bebe6a6449811e877f7eba3f1798ddd1fa83e440.diff
LOG: [GlobalISel] Combine (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
This implements
```
(logic_op (op x...), (op y...)) -> (op (logic_op x, y))
```
when `op` is an extend, a shift, or an and.
This is similar to `DAGCombiner::hoistLogicOpWithSameOpcodeHands`
(with a bunch of missing cases, e.g. G_TRUNC, G_BITCAST, etc.)
This is implemented so it works both pre and post-legalization.
This also adds a general way to add a series of instructions in a combine.
(`applyBuildInstructionSteps`).
Differential Revision: https://reviews.llvm.org/D85050
Added:
llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-hoist-same-hands.mir
llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-hoist-same-hands.mir
Modified:
llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
llvm/include/llvm/Target/GlobalISel/Combine.td
llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
llvm/lib/Target/AArch64/AArch64Combine.td
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 3847112d5183..e632f5fd05ec 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -25,12 +25,14 @@ namespace llvm {
class GISelChangeObserver;
class MachineIRBuilder;
+class MachineInstrBuilder;
class MachineRegisterInfo;
class MachineInstr;
class MachineOperand;
class GISelKnownBits;
class MachineDominatorTree;
class LegalizerInfo;
+struct LegalityQuery;
struct PreferredTuple {
LLT Ty; // The result type of the extend.
@@ -50,6 +52,25 @@ struct PtrAddChain {
Register Base;
};
+using OperandBuildSteps =
+ SmallVector<std::function<void(MachineInstrBuilder &)>, 4>;
+struct InstructionBuildSteps {
+ unsigned Opcode = 0; /// The opcode for the produced instruction.
+ OperandBuildSteps OperandFns; /// Operands to be added to the instruction.
+ InstructionBuildSteps() = default;
+ InstructionBuildSteps(unsigned Opcode, const OperandBuildSteps &OperandFns)
+ : Opcode(Opcode), OperandFns(OperandFns) {}
+};
+
+struct InstructionStepsMatchInfo {
+ /// Describes instructions to be built during a combine.
+ SmallVector<InstructionBuildSteps, 2> InstrsToBuild;
+ InstructionStepsMatchInfo() = default;
+ InstructionStepsMatchInfo(
+ std::initializer_list<InstructionBuildSteps> InstrsToBuild)
+ : InstrsToBuild(InstrsToBuild) {}
+};
+
class CombinerHelper {
protected:
MachineIRBuilder &Builder;
@@ -69,6 +90,10 @@ class CombinerHelper {
return KB;
}
+ /// \return true if the combine is running prior to legalization, or if \p
+ /// Query is legal on the target.
+ bool isLegalOrBeforeLegalizer(const LegalityQuery &Query) const;
+
/// MachineRegisterInfo::replaceRegWith() and inform the observer of the changes
void replaceRegWith(MachineRegisterInfo &MRI, Register FromReg, Register ToReg) const;
@@ -260,6 +285,15 @@ class CombinerHelper {
bool applySimplifyAddToSub(MachineInstr &MI,
std::tuple<Register, Register> &MatchInfo);
+ /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
+ bool
+ matchHoistLogicOpWithSameOpcodeHands(MachineInstr &MI,
+ InstructionStepsMatchInfo &MatchInfo);
+
+ /// Replace \p MI with a series of instructions described in \p MatchInfo.
+ bool applyBuildInstructionSteps(MachineInstr &MI,
+ InstructionStepsMatchInfo &MatchInfo);
+
/// Try to transform \p MI by using all of the above
/// combine functions. Returns true if changed.
bool tryCombine(MachineInstr &MI);
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index ba19a4635570..9cb45e2bfc11 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -85,6 +85,7 @@ class GIDefMatchData<string type> : GIDefKind {
def extending_load_matchdata : GIDefMatchData<"PreferredTuple">;
def indexed_load_store_matchdata : GIDefMatchData<"IndexedLoadStoreMatchInfo">;
+def instruction_steps_matchdata: GIDefMatchData<"InstructionStepsMatchInfo">;
/// The operator at the root of a GICombineRule.Match dag.
def match;
@@ -275,6 +276,14 @@ def i2p_to_p2i: GICombineRule<
(apply [{ return Helper.applyCombineP2IToI2P(*${root}, ${info}); }])
>;
+// Simplify: (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
+def hoist_logic_op_with_same_opcode_hands: GICombineRule <
+ (defs root:$root, instruction_steps_matchdata:$info),
+ (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
+ [{ return Helper.matchHoistLogicOpWithSameOpcodeHands(*${root}, ${info}); }]),
+ (apply [{ return Helper.applyBuildInstructionSteps(*${root}, ${info});}])
+>;
+
// FIXME: These should use the custom predicate feature once it lands.
def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
undef_to_negative_one,
@@ -285,10 +294,11 @@ def undef_combines : GICombineGroup<[undef_to_fp_zero, undef_to_int_zero,
def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
binop_same_val, binop_left_to_zero,
- binop_right_to_zero, p2i_to_i2p,
+ binop_right_to_zero, p2i_to_i2p,
i2p_to_p2i]>;
def trivial_combines : GICombineGroup<[copy_prop, mul_to_shl]>;
def all_combines : GICombineGroup<[trivial_combines, ptr_add_immed_chain,
combines_for_extload, combine_indexed_load_store, undef_combines,
- identity_combines, simplify_add_to_sub]>;
+ identity_combines, simplify_add_to_sub,
+ hoist_logic_op_with_same_opcode_hands]>;
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index a9ec1d4365a3..b922f6988a2c 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -34,7 +34,6 @@ static cl::opt<bool>
cl::desc("Force all indexed operations to be "
"legal for the GlobalISel combiner"));
-
CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
MachineIRBuilder &B, GISelKnownBits *KB,
MachineDominatorTree *MDT,
@@ -44,6 +43,11 @@ CombinerHelper::CombinerHelper(GISelChangeObserver &Observer,
(void)this->KB;
}
+bool CombinerHelper::isLegalOrBeforeLegalizer(
+ const LegalityQuery &Query) const {
+ return !LI || LI->getAction(Query).Action == LegalizeActions::Legal;
+}
+
void CombinerHelper::replaceRegWith(MachineRegisterInfo &MRI, Register FromReg,
Register ToReg) const {
Observer.changingAllUsesOfReg(MRI, FromReg);
@@ -1776,6 +1780,113 @@ bool CombinerHelper::applySimplifyAddToSub(
return true;
}
+bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
+ MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
+ // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
+ //
+ // Creates the new hand + logic instruction (but does not insert them.)
+ //
+ // On success, MatchInfo is populated with the new instructions. These are
+ // inserted in applyHoistLogicOpWithSameOpcodeHands.
+ unsigned LogicOpcode = MI.getOpcode();
+ assert(LogicOpcode == TargetOpcode::G_AND ||
+ LogicOpcode == TargetOpcode::G_OR ||
+ LogicOpcode == TargetOpcode::G_XOR);
+ MachineIRBuilder MIB(MI);
+ Register Dst = MI.getOperand(0).getReg();
+ Register LHSReg = MI.getOperand(1).getReg();
+ Register RHSReg = MI.getOperand(2).getReg();
+
+ // Don't recompute anything.
+ if (!MRI.hasOneNonDBGUse(LHSReg) || !MRI.hasOneNonDBGUse(RHSReg))
+ return false;
+
+ // Make sure we have (hand x, ...), (hand y, ...)
+ MachineInstr *LeftHandInst = getDefIgnoringCopies(LHSReg, MRI);
+ MachineInstr *RightHandInst = getDefIgnoringCopies(RHSReg, MRI);
+ if (!LeftHandInst || !RightHandInst)
+ return false;
+ unsigned HandOpcode = LeftHandInst->getOpcode();
+ if (HandOpcode != RightHandInst->getOpcode())
+ return false;
+ if (!LeftHandInst->getOperand(1).isReg() ||
+ !RightHandInst->getOperand(1).isReg())
+ return false;
+
+ // Make sure the types match up, and if we're doing this post-legalization,
+ // we end up with legal types.
+ Register X = LeftHandInst->getOperand(1).getReg();
+ Register Y = RightHandInst->getOperand(1).getReg();
+ LLT XTy = MRI.getType(X);
+ LLT YTy = MRI.getType(Y);
+ if (XTy != YTy)
+ return false;
+ if (!isLegalOrBeforeLegalizer({LogicOpcode, {XTy, YTy}}))
+ return false;
+
+ // Optional extra source register.
+ Register ExtraHandOpSrcReg;
+ switch (HandOpcode) {
+ default:
+ return false;
+ case TargetOpcode::G_ANYEXT:
+ case TargetOpcode::G_SEXT:
+ case TargetOpcode::G_ZEXT: {
+ // Match: logic (ext X), (ext Y) --> ext (logic X, Y)
+ break;
+ }
+ case TargetOpcode::G_AND:
+ case TargetOpcode::G_ASHR:
+ case TargetOpcode::G_LSHR:
+ case TargetOpcode::G_SHL: {
+ // Match: logic (binop x, z), (binop y, z) -> binop (logic x, y), z
+ MachineOperand &ZOp = LeftHandInst->getOperand(2);
+ if (!matchEqualDefs(ZOp, RightHandInst->getOperand(2)))
+ return false;
+ ExtraHandOpSrcReg = ZOp.getReg();
+ break;
+ }
+ }
+
+ // Record the steps to build the new instructions.
+ //
+ // Steps to build (logic x, y)
+ auto NewLogicDst = MRI.createGenericVirtualRegister(XTy);
+ OperandBuildSteps LogicBuildSteps = {
+ [=](MachineInstrBuilder &MIB) { MIB.addDef(NewLogicDst); },
+ [=](MachineInstrBuilder &MIB) { MIB.addReg(X); },
+ [=](MachineInstrBuilder &MIB) { MIB.addReg(Y); }};
+ InstructionBuildSteps LogicSteps(LogicOpcode, LogicBuildSteps);
+
+ // Steps to build hand (logic x, y), ...z
+ OperandBuildSteps HandBuildSteps = {
+ [=](MachineInstrBuilder &MIB) { MIB.addDef(Dst); },
+ [=](MachineInstrBuilder &MIB) { MIB.addReg(NewLogicDst); }};
+ if (ExtraHandOpSrcReg.isValid())
+ HandBuildSteps.push_back(
+ [=](MachineInstrBuilder &MIB) { MIB.addReg(ExtraHandOpSrcReg); });
+ InstructionBuildSteps HandSteps(HandOpcode, HandBuildSteps);
+
+ MatchInfo = InstructionStepsMatchInfo({LogicSteps, HandSteps});
+ return true;
+}
+
+bool CombinerHelper::applyBuildInstructionSteps(
+ MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) {
+ assert(MatchInfo.InstrsToBuild.size() &&
+ "Expected at least one instr to build?");
+ Builder.setInstr(MI);
+ for (auto &InstrToBuild : MatchInfo.InstrsToBuild) {
+ assert(InstrToBuild.Opcode && "Expected a valid opcode?");
+ assert(InstrToBuild.OperandFns.size() && "Expected at least one operand?");
+ MachineInstrBuilder Instr = Builder.buildInstr(InstrToBuild.Opcode);
+ for (auto &OperandFn : InstrToBuild.OperandFns)
+ OperandFn(Instr);
+ }
+ MI.eraseFromParent();
+ return true;
+}
+
bool CombinerHelper::tryCombine(MachineInstr &MI) {
if (tryCombineCopy(MI))
return true;
diff --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index b8da73de0f8c..acbb616f9d2d 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -79,6 +79,7 @@ def shuffle_vector_pseudos : GICombineGroup<[dup, rev, ext, zip, uzp, trn]>;
def AArch64PostLegalizerCombinerHelper
: GICombinerHelper<"AArch64GenPostLegalizerCombinerHelper",
[erase_undef_store, combines_for_extload,
- sext_trunc_sextload, shuffle_vector_pseudos]> {
+ sext_trunc_sextload, shuffle_vector_pseudos,
+ hoist_logic_op_with_same_opcode_hands]> {
let DisableRuleOption = "aarch64postlegalizercombiner-disable-rule";
}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-hoist-same-hands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-hoist-same-hands.mir
new file mode 100644
index 000000000000..b05bd923e34d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/postlegalizercombiner-hoist-same-hands.mir
@@ -0,0 +1,60 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-postlegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
+
+...
+---
+name: or_combine_sext
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; or (sext X), (sext Y) --> sext (or X, Y)
+ ;
+ ; CHECK-LABEL: name: or_combine_sext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_SEXT [[OR]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_SEXT %x(s32)
+ %hand2:_(s64) = G_SEXT %y(s32)
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: illegal_ty
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; Post-legalization, we should not perform the optimization here, since
+ ; it would create an illegal G_OR.
+ ;
+ ; CHECK-LABEL: name: illegal_ty
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x_wide:_(s32) = COPY $w0
+ ; CHECK: %y_wide:_(s32) = COPY $w1
+ ; CHECK: %x:_(s1) = G_TRUNC %x_wide(s32)
+ ; CHECK: %y:_(s1) = G_TRUNC %y_wide(s32)
+ ; CHECK: %hand1:_(s64) = G_SEXT %x(s1)
+ ; CHECK: %hand2:_(s64) = G_SEXT %y(s1)
+ ; CHECK: %logic_op:_(s64) = G_OR %hand1, %hand2
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x_wide:_(s32) = COPY $w0
+ %y_wide:_(s32) = COPY $w1
+ %x:_(s1) = G_TRUNC %x_wide
+ %y:_(s1) = G_TRUNC %y_wide
+ %hand1:_(s64) = G_SEXT %x(s1)
+ %hand2:_(s64) = G_SEXT %y(s1)
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-hoist-same-hands.mir b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-hoist-same-hands.mir
new file mode 100644
index 000000000000..48fc042d7c73
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-hoist-same-hands.mir
@@ -0,0 +1,692 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple aarch64 -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
+
+...
+---
+name: or_combine_sext
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; or (sext X), (sext Y) --> sext (or X, Y)
+ ;
+ ; CHECK-LABEL: name: or_combine_sext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_SEXT [[OR]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_SEXT %x(s32)
+ %hand2:_(s64) = G_SEXT %y(s32)
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: or_combine_zext
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; or (zext X), (zext Y) --> zext (or X, Y)
+ ;
+ ; CHECK-LABEL: name: or_combine_zext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_ZEXT [[OR]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_ZEXT %x(s32)
+ %hand2:_(s64) = G_ZEXT %y(s32)
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: or_combine_anyext
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; or (anyext X), (anyext Y) --> anyext (or X, Y)
+ ;
+ ; CHECK-LABEL: name: or_combine_anyext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_ANYEXT [[OR]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_ANYEXT %x(s32)
+ %hand2:_(s64) = G_ANYEXT %y(s32)
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: and_combine_sext
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; and (sext X), (sext Y) --> sext (and X, Y)
+ ;
+ ; CHECK-LABEL: name: and_combine_sext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND %x, %y
+ ; CHECK: %logic_op:_(s64) = G_SEXT [[AND]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_SEXT %x(s32)
+ %hand2:_(s64) = G_SEXT %y(s32)
+ %logic_op:_(s64) = G_AND %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: and_combine_zext
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; and (zext X), (zext Y) --> zext (and X, Y)
+ ;
+ ; CHECK-LABEL: name: and_combine_zext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND %x, %y
+ ; CHECK: %logic_op:_(s64) = G_ZEXT [[AND]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_ZEXT %x(s32)
+ %hand2:_(s64) = G_ZEXT %y(s32)
+ %logic_op:_(s64) = G_AND %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: and_combine_anyext
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; and (anyext X), (anyext Y) --> anyext (and X, Y)
+ ;
+ ; CHECK-LABEL: name: and_combine_anyext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND %x, %y
+ ; CHECK: %logic_op:_(s64) = G_ANYEXT [[AND]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_ANYEXT %x(s32)
+ %hand2:_(s64) = G_ANYEXT %y(s32)
+ %logic_op:_(s64) = G_AND %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: xor_combine_sext
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; xor (sext X), (sext Y) --> sext (xor X, Y)
+ ;
+ ; CHECK-LABEL: name: xor_combine_sext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_SEXT [[XOR]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_SEXT %x(s32)
+ %hand2:_(s64) = G_SEXT %y(s32)
+ %logic_op:_(s64) = G_XOR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: xor_combine_zext
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; xor (zext X), (zext Y) --> zext (xor X, Y)
+ ;
+ ; CHECK-LABEL: name: xor_combine_zext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_ZEXT [[XOR]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_ZEXT %x(s32)
+ %hand2:_(s64) = G_ZEXT %y(s32)
+ %logic_op:_(s64) = G_XOR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: xor_combine_anyext
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; xor (anyext X), (anyext Y) --> anyext (xor X, Y)
+ ;
+ ; CHECK-LABEL: name: xor_combine_anyext
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_ANYEXT [[XOR]](s32)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_ANYEXT %x(s32)
+ %hand2:_(s64) = G_ANYEXT %y(s32)
+ %logic_op:_(s64) = G_XOR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: dont_combine_
diff erent_types
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; When %x and %y don't have the same type, don't perform the combine.
+ ;
+ ; CHECK-LABEL: name: dont_combine_
diff erent_types
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %hand1:_(s32) = COPY $w1
+ ; CHECK: %y:_(s16) = G_TRUNC %hand1(s32)
+ ; CHECK: %hand2:_(s64) = G_SEXT %x(s32)
+ ; CHECK: %logic_op:_(s64) = G_SEXT %y(s16)
+ ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR %hand2, %logic_op
+ ; CHECK: $x0 = COPY [[OR]](s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %hand1:_(s32) = COPY $w1
+ %y:_(s16) = G_TRUNC %hand1(s32)
+ %hand2:_(s64) = G_SEXT %x(s32)
+ %logic_op:_(s64) = G_SEXT %y(s16)
+ %5:_(s64) = G_OR %hand2, %logic_op
+ $x0 = COPY %5(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: illegal_ty
+tracksRegLiveness: true
+legalized: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; Pre-legalization, it's okay to produce illegal types.
+ ;
+ ; CHECK-LABEL: name: illegal_ty
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x_wide:_(s32) = COPY $w0
+ ; CHECK: %y_wide:_(s32) = COPY $w1
+ ; CHECK: %x:_(s1) = G_TRUNC %x_wide(s32)
+ ; CHECK: %y:_(s1) = G_TRUNC %y_wide(s32)
+ ; CHECK: [[OR:%[0-9]+]]:_(s1) = G_OR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_SEXT [[OR]](s1)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x_wide:_(s32) = COPY $w0
+ %y_wide:_(s32) = COPY $w1
+ %x:_(s1) = G_TRUNC %x_wide
+ %y:_(s1) = G_TRUNC %y_wide
+ %hand1:_(s64) = G_SEXT %x(s1)
+ %hand2:_(s64) = G_SEXT %y(s1)
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: or_combine_and
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; or (and x, z), (and y, z) --> and (or x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: or_combine_and
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_AND [[OR]], %z
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_AND %x(s64), %z
+ %hand2:_(s64) = G_AND %y(s64), %z
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: or_combine_ashr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; or (ashr x, z), (ashr y, z) --> ashr (or x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: or_combine_ashr
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_ASHR [[OR]], %z(s64)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_ASHR %x(s64), %z
+ %hand2:_(s64) = G_ASHR %y(s64), %z
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: or_combine_lshr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; or (lshr x, z), (lshr y, z) --> lshr (or x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: or_combine_lshr
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_LSHR [[OR]], %z(s64)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_LSHR %x(s64), %z
+ %hand2:_(s64) = G_LSHR %y(s64), %z
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: or_combine_shl
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; or (shl x, z), (shl y, z) --> shl (or x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: or_combine_shl
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_SHL [[OR]], %z(s64)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_SHL %x(s64), %z
+ %hand2:_(s64) = G_SHL %y(s64), %z
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: xor_combine_and
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; xor (and x, z), (and y, z) --> and (xor x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: xor_combine_and
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_AND [[XOR]], %z
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_AND %x(s64), %z
+ %hand2:_(s64) = G_AND %y(s64), %z
+ %logic_op:_(s64) = G_XOR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: xor_combine_ashr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; xor (ashr x, z), (ashr y, z) --> ashr (xor x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: xor_combine_ashr
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_ASHR [[XOR]], %z(s64)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_ASHR %x(s64), %z
+ %hand2:_(s64) = G_ASHR %y(s64), %z
+ %logic_op:_(s64) = G_XOR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: xor_combine_lshr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; xor (lshr x, z), (lshr y, z) --> lshr (xor x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: xor_combine_lshr
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_LSHR [[XOR]], %z(s64)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_LSHR %x(s64), %z
+ %hand2:_(s64) = G_LSHR %y(s64), %z
+ %logic_op:_(s64) = G_XOR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: xor_combine_shl
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; xor (shl x, z), (shl y, z) --> shl (xor x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: xor_combine_shl
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[XOR:%[0-9]+]]:_(s64) = G_XOR %x, %y
+ ; CHECK: %logic_op:_(s64) = G_SHL [[XOR]], %z(s64)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_SHL %x(s64), %z
+ %hand2:_(s64) = G_SHL %y(s64), %z
+ %logic_op:_(s64) = G_XOR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: and_combine_and
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; and (and x, z), (and y, z) --> and (and x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: and_combine_and
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %x, %y
+ ; CHECK: %logic_op:_(s64) = G_AND [[AND]], %z
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_AND %x(s64), %z
+ %hand2:_(s64) = G_AND %y(s64), %z
+ %logic_op:_(s64) = G_AND %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: and_combine_ashr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; and (ashr x, z), (ashr y, z) --> ashr (and x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: and_combine_ashr
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %x, %y
+ ; CHECK: %logic_op:_(s64) = G_ASHR [[AND]], %z(s64)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_ASHR %x(s64), %z
+ %hand2:_(s64) = G_ASHR %y(s64), %z
+ %logic_op:_(s64) = G_AND %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: and_combine_lshr
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; and (lshr x, z), (lshr y, z) --> lshr (and x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: and_combine_lshr
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %x, %y
+ ; CHECK: %logic_op:_(s64) = G_LSHR [[AND]], %z(s64)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_LSHR %x(s64), %z
+ %hand2:_(s64) = G_LSHR %y(s64), %z
+ %logic_op:_(s64) = G_AND %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: and_combine_shl
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; and (shl x, z), (shl y, z) --> shl (and x, y), z
+ ;
+ liveins: $x0, $x1, $x2
+ ; CHECK-LABEL: name: and_combine_shl
+ ; CHECK: liveins: $x0, $x1, $x2
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z:_(s64) = COPY $x2
+ ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND %x, %y
+ ; CHECK: %logic_op:_(s64) = G_SHL [[AND]], %z(s64)
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z:_(s64) = COPY $x2
+ %hand1:_(s64) = G_SHL %x(s64), %z
+ %hand2:_(s64) = G_SHL %y(s64), %z
+ %logic_op:_(s64) = G_AND %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: dont_combine_
diff erent_defs_on_binop
+tracksRegLiveness: true
+body: |
+ bb.0:
+ ; z1 != z2, so don't combine.
+ ;
+ liveins: $x0, $x1, $x2, $x3
+ ; CHECK-LABEL: name: dont_combine_
diff erent_defs_on_binop
+ ; CHECK: liveins: $x0, $x1, $x2, $x3
+ ; CHECK: %x:_(s64) = COPY $x0
+ ; CHECK: %y:_(s64) = COPY $x1
+ ; CHECK: %z1:_(s64) = COPY $x2
+ ; CHECK: %z2:_(s64) = COPY $x3
+ ; CHECK: %hand1:_(s64) = G_AND %x, %z1
+ ; CHECK: %hand2:_(s64) = G_AND %y, %z2
+ ; CHECK: %logic_op:_(s64) = G_OR %hand1, %hand2
+ ; CHECK: $x0 = COPY %logic_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s64) = COPY $x0
+ %y:_(s64) = COPY $x1
+ %z1:_(s64) = COPY $x2
+ %z2:_(s64) = COPY $x3
+ %hand1:_(s64) = G_AND %x(s64), %z1
+ %hand2:_(s64) = G_AND %y(s64), %z2
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ $x0 = COPY %logic_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: dont_combine_more_than_one_use
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $w0, $w1
+ ; The LHS register is used more than once. Don't combine.
+ ;
+ ; CHECK-LABEL: name: dont_combine_more_than_one_use
+ ; CHECK: liveins: $w0, $w1
+ ; CHECK: %x:_(s32) = COPY $w0
+ ; CHECK: %y:_(s32) = COPY $w1
+ ; CHECK: %hand1:_(s64) = G_SEXT %x(s32)
+ ; CHECK: %hand2:_(s64) = G_SEXT %y(s32)
+ ; CHECK: %logic_op:_(s64) = G_OR %hand1, %hand2
+ ; CHECK: %other_op:_(s64) = G_ADD %hand1, %logic_op
+ ; CHECK: $x0 = COPY %other_op(s64)
+ ; CHECK: RET_ReallyLR implicit $x0
+ %x:_(s32) = COPY $w0
+ %y:_(s32) = COPY $w1
+ %hand1:_(s64) = G_SEXT %x(s32)
+ %hand2:_(s64) = G_SEXT %y(s32)
+ %logic_op:_(s64) = G_OR %hand1, %hand2
+ %other_op:_(s64) = G_ADD %hand1, %logic_op
+ $x0 = COPY %other_op(s64)
+ RET_ReallyLR implicit $x0
+
+...
+---
+name: combine_vector
+tracksRegLiveness: true
+body: |
+ bb.0:
+ liveins: $d0, $d1
+ ; Show that we can combine vector types.
+ ;
+ ; CHECK-LABEL: name: combine_vector
+ ; CHECK: liveins: $d0, $d1
+ ; CHECK: %x:_(<2 x s32>) = COPY $d0
+ ; CHECK: %y:_(<2 x s32>) = COPY $d1
+ ; CHECK: [[OR:%[0-9]+]]:_(<2 x s32>) = G_OR %x, %y
+ ; CHECK: %logic_op:_(<2 x s64>) = G_SEXT [[OR]](<2 x s32>)
+ ; CHECK: $q0 = COPY %logic_op(<2 x s64>)
+ ; CHECK: RET_ReallyLR implicit $q0
+ %x:_(<2 x s32>) = COPY $d0
+ %y:_(<2 x s32>) = COPY $d1
+ %hand1:_(<2 x s64>) = G_SEXT %x(<2 x s32>)
+ %hand2:_(<2 x s64>) = G_SEXT %y(<2 x s32>)
+ %logic_op:_(<2 x s64>) = G_OR %hand1, %hand2
+ $q0 = COPY %logic_op(<2 x s64>)
+ RET_ReallyLR implicit $q0
More information about the llvm-commits
mailing list