[llvm] 0877fbc - GlobalISel: Add FoldBinOpIntoSelect combine

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 8 15:17:29 PST 2022


Author: Matt Arsenault
Date: 2022-02-08T18:17:21-05:00
New Revision: 0877fbcc16cdb4ea62e86fbe82d9c0eec9375952

URL: https://github.com/llvm/llvm-project/commit/0877fbcc16cdb4ea62e86fbe82d9c0eec9375952
DIFF: https://github.com/llvm/llvm-project/commit/0877fbcc16cdb4ea62e86fbe82d9c0eec9375952.diff

LOG: GlobalISel: Add FoldBinOpIntoSelect combine

This will do the combine in cases that should fold, but don't
now. e.g. we're relying on the CSEMIRBuilder's incomplete constant
folding. For instance it doesn't handle FP operations or vectors (and
we don't have separate constant folding combines either to catch
them).

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
    llvm/include/llvm/CodeGen/GlobalISel/Utils.h
    llvm/include/llvm/Target/GlobalISel/Combine.td
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
    llvm/lib/CodeGen/GlobalISel/Utils.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fold-binop-into-select.mir

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 198f120496949..7e754b8b2ffdf 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -539,6 +539,13 @@ class CombinerHelper {
   /// Combine G_UREM x, (known power of 2) to an add and bitmasking.
   void applySimplifyURemByPow2(MachineInstr &MI);
 
+  /// Push a binary operator through a select on constants.
+  ///
+  /// binop (select cond, K0, K1), K2 ->
+  ///   select cond, (binop K0, K2), (binop K1, K2)
+  bool matchFoldBinOpIntoSelect(MachineInstr &MI, unsigned &SelectOpNo);
+  bool applyFoldBinOpIntoSelect(MachineInstr &MI, const unsigned &SelectOpNo);
+
   bool matchCombineInsertVecElts(MachineInstr &MI,
                                  SmallVectorImpl<Register> &MatchInfo);
 

diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
index aed915d2cc4b7..687e9d2da6477 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/Utils.h
@@ -408,6 +408,30 @@ bool isBuildVectorAllOnes(const MachineInstr &MI,
                           const MachineRegisterInfo &MRI,
                           bool AllowUndef = false);
 
+/// Return true if the specified instruction is known to be a constant, or a
+/// vector of constants.
+///
+/// If \p AllowFP is true, this will consider G_FCONSTANT in addition to
+/// G_CONSTANT. If \p AllowOpaqueConstants is true, constant-like instructions
+/// such as G_GLOBAL_VALUE will also be considered.
+bool isConstantOrConstantVector(const MachineInstr &MI,
+                                const MachineRegisterInfo &MRI,
+                                bool AllowFP = true,
+                                bool AllowOpaqueConstants = true);
+
+/// Return true if the value is a constant 0 integer or a splatted vector of a
+/// constant 0 integer (with no undefs if \p AllowUndefs is false). This will
+/// handle G_BUILD_VECTOR and G_BUILD_VECTOR_TRUNC as truncation is not an issue
+/// for null values.
+bool isNullOrNullSplat(const MachineInstr &MI, const MachineRegisterInfo &MRI,
+                       bool AllowUndefs = false);
+
+/// Return true if the value is a constant -1 integer or a splatted vector of a
+/// constant -1 integer (with no undefs if \p AllowUndefs is false).
+bool isAllOnesOrAllOnesSplat(const MachineInstr &MI,
+                             const MachineRegisterInfo &MRI,
+                             bool AllowUndefs = false);
+
 /// \returns a value when \p MI is a vector splat. The splat can be either a
 /// Register or a constant.
 ///

diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index ed31b6f10a31a..e89c50e467935 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -118,6 +118,7 @@ def int64_matchinfo: GIDefMatchData<"int64_t">;
 def apint_matchinfo : GIDefMatchData<"APInt">;
 def build_fn_matchinfo :
 GIDefMatchData<"std::function<void(MachineIRBuilder &)>">;
+def unsigned_matchinfo: GIDefMatchData<"unsigned">;
 
 def copy_prop : GICombineRule<
   (defs root:$d),
@@ -323,6 +324,26 @@ def urem_pow2_to_mask : GICombineRule<
   (apply [{ Helper.applySimplifyURemByPow2(*${root}); }])
 >;
 
+// Push a binary operator through a select on constants.
+//
+// binop (select cond, K0, K1), K2 ->
+//   select cond, (binop K0, K2), (binop K1, K2)
+
+// Every binary operator that has constant folding. We currently do
+// not have constant folding for G_FPOW, G_FMAXNUM_IEEE or
+// G_FMINNUM_IEEE.
+def fold_binop_into_select : GICombineRule<
+  (defs root:$root, unsigned_matchinfo:$select_op_no),
+  (match (wip_match_opcode
+    G_ADD, G_SUB, G_PTR_ADD, G_AND, G_OR, G_XOR,
+    G_SDIV, G_SREM, G_UDIV, G_UREM, G_LSHR, G_ASHR, G_SHL,
+    G_SMIN, G_SMAX, G_UMIN, G_UMAX,
+    G_FMUL, G_FADD, G_FSUB, G_FDIV, G_FREM,
+    G_FMINNUM, G_FMAXNUM, G_FMINIMUM, G_FMAXIMUM):$root,
+    [{ return Helper.matchFoldBinOpIntoSelect(*${root}, ${select_op_no}); }]),
+  (apply [{ return Helper.applyFoldBinOpIntoSelect(*${root}, ${select_op_no}); }])
+>;
+
 // Transform d = [su]div(x, y) and r = [su]rem(x, y) - > d, r = [su]divrem(x, y)
 def div_rem_to_divrem_matchdata : GIDefMatchData<"MachineInstr *">;
 def div_rem_to_divrem : GICombineRule<
@@ -913,7 +934,7 @@ def all_combines : GICombineGroup<[trivial_combines, insert_vec_elt_combines,
     truncstore_merge, div_rem_to_divrem, funnel_shift_combines,
     form_bitfield_extract, constant_fold, fabs_fneg_fold,
     intdiv_combines, mulh_combines, redundant_neg_operands,
-    and_or_disjoint_mask, fma_combines]>;
+    and_or_disjoint_mask, fma_combines, fold_binop_into_select]>;
 
 // A combine group used to for prelegalizer combiners at -O0. The combines in
 // this group have been selected based on experiments to balance code size and

diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 8e72bdda481a3..83fde833f6473 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -3080,6 +3080,102 @@ void CombinerHelper::applySimplifyURemByPow2(MachineInstr &MI) {
   MI.eraseFromParent();
 }
 
+bool CombinerHelper::matchFoldBinOpIntoSelect(MachineInstr &MI,
+                                              unsigned &SelectOpNo) {
+  Register LHS = MI.getOperand(1).getReg();
+  Register RHS = MI.getOperand(2).getReg();
+
+  Register OtherOperandReg = RHS;
+  SelectOpNo = 1;
+  MachineInstr *Select = MRI.getVRegDef(LHS);
+
+  // Don't do this unless the old select is going away. We want to eliminate the
+  // binary operator, not replace a binop with a select.
+  if (Select->getOpcode() != TargetOpcode::G_SELECT ||
+      !MRI.hasOneNonDBGUse(LHS)) {
+    OtherOperandReg = LHS;
+    SelectOpNo = 2;
+    Select = MRI.getVRegDef(RHS);
+    if (Select->getOpcode() != TargetOpcode::G_SELECT ||
+        !MRI.hasOneNonDBGUse(RHS))
+      return false;
+  }
+
+  MachineInstr *SelectLHS = MRI.getVRegDef(Select->getOperand(2).getReg());
+  MachineInstr *SelectRHS = MRI.getVRegDef(Select->getOperand(3).getReg());
+
+  if (!isConstantOrConstantVector(*SelectLHS, MRI,
+                                  /*AllowFP*/ true,
+                                  /*AllowOpaqueConstants*/ false))
+    return false;
+  if (!isConstantOrConstantVector(*SelectRHS, MRI,
+                                  /*AllowFP*/ true,
+                                  /*AllowOpaqueConstants*/ false))
+    return false;
+
+  unsigned BinOpcode = MI.getOpcode();
+
+  // We know know one of the operands is a select of constants. Now verify that
+  // the other binary operator operand is either a constant, or we can handle a
+  // variable.
+  bool CanFoldNonConst =
+      (BinOpcode == TargetOpcode::G_AND || BinOpcode == TargetOpcode::G_OR) &&
+      (isNullOrNullSplat(*SelectLHS, MRI) ||
+       isAllOnesOrAllOnesSplat(*SelectLHS, MRI)) &&
+      (isNullOrNullSplat(*SelectRHS, MRI) ||
+       isAllOnesOrAllOnesSplat(*SelectRHS, MRI));
+  if (CanFoldNonConst)
+    return true;
+
+  return isConstantOrConstantVector(*MRI.getVRegDef(OtherOperandReg), MRI,
+                                    /*AllowFP*/ true,
+                                    /*AllowOpaqueConstants*/ false);
+}
+
+/// \p SelectOperand is the operand in binary operator \p MI that is the select
+/// to fold.
+bool CombinerHelper::applyFoldBinOpIntoSelect(MachineInstr &MI,
+                                              const unsigned &SelectOperand) {
+  Builder.setInstrAndDebugLoc(MI);
+
+  Register Dst = MI.getOperand(0).getReg();
+  Register LHS = MI.getOperand(1).getReg();
+  Register RHS = MI.getOperand(2).getReg();
+  MachineInstr *Select = MRI.getVRegDef(MI.getOperand(SelectOperand).getReg());
+
+  Register SelectCond = Select->getOperand(1).getReg();
+  Register SelectTrue = Select->getOperand(2).getReg();
+  Register SelectFalse = Select->getOperand(3).getReg();
+
+  LLT Ty = MRI.getType(Dst);
+  unsigned BinOpcode = MI.getOpcode();
+
+  Register FoldTrue, FoldFalse;
+
+  // We have a select-of-constants followed by a binary operator with a
+  // constant. Eliminate the binop by pulling the constant math into the select.
+  // Example: add (select Cond, CT, CF), CBO --> select Cond, CT + CBO, CF + CBO
+  if (SelectOperand == 1) {
+    // TODO: SelectionDAG verifies this actually constant folds before
+    // committing to the combine.
+
+    FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {SelectTrue, RHS}).getReg(0);
+    FoldFalse =
+        Builder.buildInstr(BinOpcode, {Ty}, {SelectFalse, RHS}).getReg(0);
+  } else {
+    FoldTrue = Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectTrue}).getReg(0);
+    FoldFalse =
+        Builder.buildInstr(BinOpcode, {Ty}, {LHS, SelectFalse}).getReg(0);
+  }
+
+  Builder.buildSelect(Dst, SelectCond, FoldTrue, FoldFalse, MI.getFlags());
+  Observer.erasingInstr(*Select);
+  Select->eraseFromParent();
+  MI.eraseFromParent();
+
+  return true;
+}
+
 Optional<SmallVector<Register, 8>>
 CombinerHelper::findCandidatesForLoadOrCombine(const MachineInstr *Root) const {
   assert(Root->getOpcode() == TargetOpcode::G_OR && "Expected G_OR only!");

diff  --git a/llvm/lib/CodeGen/GlobalISel/Utils.cpp b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
index 544af9a2954f5..6a8750478b48e 100644
--- a/llvm/lib/CodeGen/GlobalISel/Utils.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/Utils.cpp
@@ -1104,6 +1104,26 @@ Optional<RegOrConstant> llvm::getVectorSplat(const MachineInstr &MI,
   return RegOrConstant(Reg);
 }
 
+static bool isConstantScalar(const MachineInstr &MI,
+                             const MachineRegisterInfo &MRI,
+                             bool AllowFP = true,
+                             bool AllowOpaqueConstants = true) {
+  switch (MI.getOpcode()) {
+  case TargetOpcode::G_CONSTANT:
+  case TargetOpcode::G_IMPLICIT_DEF:
+    return true;
+  case TargetOpcode::G_FCONSTANT:
+    return AllowFP;
+  case TargetOpcode::G_GLOBAL_VALUE:
+  case TargetOpcode::G_FRAME_INDEX:
+  case TargetOpcode::G_BLOCK_ADDR:
+  case TargetOpcode::G_JUMP_TABLE:
+    return AllowOpaqueConstants;
+  default:
+    return false;
+  }
+}
+
 bool llvm::isConstantOrConstantVector(MachineInstr &MI,
                                       const MachineRegisterInfo &MRI) {
   Register Def = MI.getOperand(0).getReg();
@@ -1121,6 +1141,25 @@ bool llvm::isConstantOrConstantVector(MachineInstr &MI,
   return true;
 }
 
+bool llvm::isConstantOrConstantVector(const MachineInstr &MI,
+                                      const MachineRegisterInfo &MRI,
+                                      bool AllowFP, bool AllowOpaqueConstants) {
+  if (isConstantScalar(MI, MRI, AllowFP, AllowOpaqueConstants))
+    return true;
+
+  if (!isBuildVectorOp(MI.getOpcode()))
+    return false;
+
+  const unsigned NumOps = MI.getNumOperands();
+  for (unsigned I = 1; I != NumOps; ++I) {
+    const MachineInstr *ElementDef = MRI.getVRegDef(MI.getOperand(I).getReg());
+    if (!isConstantScalar(*ElementDef, MRI, AllowFP, AllowOpaqueConstants))
+      return false;
+  }
+
+  return true;
+}
+
 Optional<APInt>
 llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
                                       const MachineRegisterInfo &MRI) {
@@ -1134,6 +1173,39 @@ llvm::isConstantOrConstantSplatVector(MachineInstr &MI,
   return APInt(ScalarSize, *MaybeCst, true);
 }
 
+bool llvm::isNullOrNullSplat(const MachineInstr &MI,
+                             const MachineRegisterInfo &MRI, bool AllowUndefs) {
+  switch (MI.getOpcode()) {
+  case TargetOpcode::G_IMPLICIT_DEF:
+    return AllowUndefs;
+  case TargetOpcode::G_CONSTANT:
+    return MI.getOperand(1).getCImm()->isNullValue();
+  case TargetOpcode::G_FCONSTANT: {
+    const ConstantFP *FPImm = MI.getOperand(1).getFPImm();
+    return FPImm->isZero() && !FPImm->isNegative();
+  }
+  default:
+    if (!AllowUndefs) // TODO: isBuildVectorAllZeros assumes undef is OK already
+      return false;
+    return isBuildVectorAllZeros(MI, MRI);
+  }
+}
+
+bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr &MI,
+                                   const MachineRegisterInfo &MRI,
+                                   bool AllowUndefs) {
+  switch (MI.getOpcode()) {
+  case TargetOpcode::G_IMPLICIT_DEF:
+    return AllowUndefs;
+  case TargetOpcode::G_CONSTANT:
+    return MI.getOperand(1).getCImm()->isAllOnesValue();
+  default:
+    if (!AllowUndefs) // TODO: isBuildVectorAllOnes assumes undef is OK already
+      return false;
+    return isBuildVectorAllOnes(MI, MRI);
+  }
+}
+
 bool llvm::matchUnaryPredicate(
     const MachineRegisterInfo &MRI, Register Reg,
     std::function<bool(const Constant *ConstVal)> Match, bool AllowUndefs) {

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fold-binop-into-select.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fold-binop-into-select.mir
index f80a2c0101587..802e7eec17362 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fold-binop-into-select.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-fold-binop-into-select.mir
@@ -131,11 +131,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
-    ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %add:_(s32) = G_ADD %select, %thirty
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 50
+    ; CHECK-NEXT: %add:_(s32) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %add(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -160,11 +158,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
-    ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %add:_(s32) = G_ADD %thirty, %select
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 50
+    ; CHECK-NEXT: %add:_(s32) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %add(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -189,14 +185,11 @@ body: |
     ; CHECK-NEXT: %reg0:_(<2 x s32>) = COPY $vgpr0_vgpr1
     ; CHECK-NEXT: %reg1:_(<2 x s32>) = COPY $vgpr2_vgpr3
     ; CHECK-NEXT: %cond:_(<2 x s1>) = G_ICMP intpred(eq), %reg0(<2 x s32>), %reg1
-    ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
-    ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %ten_vec:_(<2 x s32>) = G_BUILD_VECTOR %ten(s32), %ten(s32)
-    ; CHECK-NEXT: %twenty_vec:_(<2 x s32>) = G_BUILD_VECTOR %twenty(s32), %twenty(s32)
-    ; CHECK-NEXT: %select:_(<2 x s32>) = G_SELECT %cond(<2 x s1>), %ten_vec, %twenty_vec
-    ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %thirty_vec:_(<2 x s32>) = G_BUILD_VECTOR %thirty(s32), %thirty(s32)
-    ; CHECK-NEXT: %add:_(<2 x s32>) = G_ADD %select, %thirty_vec
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 50
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), [[C1]](s32)
+    ; CHECK-NEXT: %add:_(<2 x s32>) = G_SELECT %cond(<2 x s1>), [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %add(<2 x s32>)
     %reg0:_(<2 x s32>) = COPY $vgpr0_vgpr1
     %reg1:_(<2 x s32>) = COPY $vgpr2_vgpr3
@@ -224,14 +217,13 @@ body: |
     ; CHECK-NEXT: %reg0:_(<2 x s32>) = COPY $vgpr0_vgpr1
     ; CHECK-NEXT: %reg1:_(<2 x s32>) = COPY $vgpr2_vgpr3
     ; CHECK-NEXT: %cond:_(<2 x s1>) = G_ICMP intpred(eq), %reg0(<2 x s32>), %reg1
-    ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
     ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %const_vec0:_(<2 x s32>) = G_BUILD_VECTOR %ten(s32), %twenty(s32)
-    ; CHECK-NEXT: %const_vec1:_(<2 x s32>) = G_BUILD_VECTOR %twenty(s32), %ten(s32)
-    ; CHECK-NEXT: %select:_(<2 x s32>) = G_SELECT %cond(<2 x s1>), %const_vec0, %const_vec1
     ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %const_vec3:_(<2 x s32>) = G_BUILD_VECTOR %thirty(s32), %ten(s32)
-    ; CHECK-NEXT: %add:_(<2 x s32>) = G_ADD %select, %const_vec3
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; CHECK-NEXT: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), %thirty(s32)
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 50
+    ; CHECK-NEXT: [[BUILD_VECTOR1:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[C1]](s32), %twenty(s32)
+    ; CHECK-NEXT: %add:_(<2 x s32>) = G_SELECT %cond(<2 x s1>), [[BUILD_VECTOR]], [[BUILD_VECTOR1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %add(<2 x s32>)
     %reg0:_(<2 x s32>) = COPY $vgpr0_vgpr1
     %reg1:_(<2 x s32>) = COPY $vgpr2_vgpr3
@@ -263,10 +255,11 @@ body: |
     ; CHECK-NEXT: %undef:_(s32) = G_IMPLICIT_DEF
     ; CHECK-NEXT: %const_vec0:_(<2 x s32>) = G_BUILD_VECTOR %undef(s32), %twenty(s32)
     ; CHECK-NEXT: %const_vec1:_(<2 x s32>) = G_BUILD_VECTOR %twenty(s32), %undef(s32)
-    ; CHECK-NEXT: %select:_(<2 x s32>) = G_SELECT %cond(<2 x s1>), %const_vec0, %const_vec1
     ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
     ; CHECK-NEXT: %const_vec3:_(<2 x s32>) = G_BUILD_VECTOR %thirty(s32), %undef(s32)
-    ; CHECK-NEXT: %add:_(<2 x s32>) = G_ADD %select, %const_vec3
+    ; CHECK-NEXT: [[ADD:%[0-9]+]]:_(<2 x s32>) = G_ADD %const_vec0, %const_vec3
+    ; CHECK-NEXT: [[ADD1:%[0-9]+]]:_(<2 x s32>) = G_ADD %const_vec1, %const_vec3
+    ; CHECK-NEXT: %add:_(<2 x s32>) = G_SELECT %cond(<2 x s1>), [[ADD]], [[ADD1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %add(<2 x s32>)
     %reg0:_(<2 x s32>) = COPY $vgpr0_vgpr1
     %reg1:_(<2 x s32>) = COPY $vgpr2_vgpr3
@@ -295,11 +288,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
-    ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %sub:_(s32) = G_SUB %select, %thirty
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -20
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -10
+    ; CHECK-NEXT: %sub:_(s32) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %sub(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -326,9 +317,7 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
     ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %sub:_(s32) = G_SUB %thirty, %select
+    ; CHECK-NEXT: %sub:_(s32) = G_SELECT %cond(s1), %twenty, %ten
     ; CHECK-NEXT: S_ENDPGM 0, implicit %sub(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -355,9 +344,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %ten:_(p3) = G_CONSTANT i32 10
     ; CHECK-NEXT: %twenty:_(p3) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(p3) = G_SELECT %cond(s1), %ten, %twenty
     ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %ptr_add:_(p3) = G_PTR_ADD %select, %thirty(s32)
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD %ten, %thirty(s32)
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD %twenty, %thirty(s32)
+    ; CHECK-NEXT: %ptr_add:_(p3) = G_SELECT %cond(s1), [[PTR_ADD]], [[PTR_ADD1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %ptr_add(p3)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -384,9 +374,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
     ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
     ; CHECK-NEXT: %thirty:_(p3) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %ptr_add:_(p3) = G_PTR_ADD %thirty, %select(s32)
+    ; CHECK-NEXT: [[PTR_ADD:%[0-9]+]]:_(p3) = G_PTR_ADD %thirty, %ten(s32)
+    ; CHECK-NEXT: [[PTR_ADD1:%[0-9]+]]:_(p3) = G_PTR_ADD %thirty, %twenty(s32)
+    ; CHECK-NEXT: %ptr_add:_(p3) = G_SELECT %cond(s1), [[PTR_ADD]], [[PTR_ADD1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %ptr_add(p3)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -411,11 +402,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %ten:_(s64) = G_CONSTANT i64 10
-    ; CHECK-NEXT: %twenty:_(s64) = G_CONSTANT i64 20
-    ; CHECK-NEXT: %select:_(s64) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %seven:_(s32) = G_CONSTANT i32 7
-    ; CHECK-NEXT: %shl:_(s64) = G_SHL %select, %seven(s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1280
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 2560
+    ; CHECK-NEXT: %shl:_(s64) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %shl(s64)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -440,11 +429,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
-    ; CHECK-NEXT: %sixteen:_(s32) = G_CONSTANT i32 16
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %sixteen
-    ; CHECK-NEXT: %eight:_(s64) = G_CONSTANT i64 8
-    ; CHECK-NEXT: %shl:_(s64) = G_SHL %eight, %select(s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 8192
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 524288
+    ; CHECK-NEXT: %shl:_(s64) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %shl(s64)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -470,9 +457,7 @@ body: |
     ; CHECK-NEXT: %variable:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %neg1:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %zero, %neg1
-    ; CHECK-NEXT: %and:_(s32) = G_AND %select, %variable
+    ; CHECK-NEXT: %and:_(s32) = G_SELECT %cond(s1), %zero, %variable
     ; CHECK-NEXT: S_ENDPGM 0, implicit %and(s32)
     %reg:_(s32) = COPY $vgpr0
     %variable:_(s32) = COPY $vgpr0
@@ -497,9 +482,7 @@ body: |
     ; CHECK-NEXT: %variable:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %neg1:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %neg1, %zero
-    ; CHECK-NEXT: %and:_(s32) = G_AND %select, %variable
+    ; CHECK-NEXT: %and:_(s32) = G_SELECT %cond(s1), %variable, %zero
     ; CHECK-NEXT: S_ENDPGM 0, implicit %and(s32)
     %reg:_(s32) = COPY $vgpr0
     %variable:_(s32) = COPY $vgpr0
@@ -525,8 +508,7 @@ body: |
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %neg1:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %zero, %neg1
-    ; CHECK-NEXT: %or:_(s32) = G_OR %select, %variable
+    ; CHECK-NEXT: %or:_(s32) = G_SELECT %cond(s1), %variable, %neg1
     ; CHECK-NEXT: S_ENDPGM 0, implicit %or(s32)
     %reg:_(s32) = COPY $vgpr0
     %variable:_(s32) = COPY $vgpr0
@@ -552,8 +534,7 @@ body: |
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %neg1:_(s32) = G_CONSTANT i32 -1
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %neg1, %zero
-    ; CHECK-NEXT: %or:_(s32) = G_OR %select, %variable
+    ; CHECK-NEXT: %or:_(s32) = G_SELECT %cond(s1), %neg1, %variable
     ; CHECK-NEXT: S_ENDPGM 0, implicit %or(s32)
     %reg:_(s32) = COPY $vgpr0
     %variable:_(s32) = COPY $vgpr0
@@ -636,8 +617,8 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %neg1:_(s32) = G_CONSTANT i32 -1
     ; CHECK-NEXT: %fpzero:_(s32) = G_FCONSTANT float 0.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %neg1, %fpzero
-    ; CHECK-NEXT: %or:_(s32) = G_OR %select, %variable
+    ; CHECK-NEXT: [[OR:%[0-9]+]]:_(s32) = G_OR %fpzero, %variable
+    ; CHECK-NEXT: %or:_(s32) = G_SELECT %cond(s1), %neg1, [[OR]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %or(s32)
     %reg:_(s32) = COPY $vgpr0
     %variable:_(s32) = COPY $vgpr0
@@ -722,9 +703,7 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
     ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %xor:_(s32) = G_XOR %select, %thirty
+    ; CHECK-NEXT: %xor:_(s32) = G_SELECT %cond(s1), %twenty, %ten
     ; CHECK-NEXT: S_ENDPGM 0, implicit %xor(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -749,11 +728,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %hundred:_(s32) = G_CONSTANT i32 100
-    ; CHECK-NEXT: %fortytwo:_(s32) = G_CONSTANT i32 42
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %hundred, %fortytwo
-    ; CHECK-NEXT: %two:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: %sdiv:_(s32) = G_SDIV %select, %two
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 50
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 21
+    ; CHECK-NEXT: %sdiv:_(s32) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %sdiv(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -778,11 +755,8 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %hundred:_(s32) = G_CONSTANT i32 100
-    ; CHECK-NEXT: %fortytwo:_(s32) = G_CONSTANT i32 42
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %hundred, %fortytwo
-    ; CHECK-NEXT: %three:_(s32) = G_CONSTANT i32 3
-    ; CHECK-NEXT: %srem:_(s32) = G_SREM %select, %three
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %srem:_(s32) = G_SELECT %cond(s1), [[C]], %zero
     ; CHECK-NEXT: S_ENDPGM 0, implicit %srem(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -807,12 +781,10 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %hundred:_(s32) = G_CONSTANT i32 100
-    ; CHECK-NEXT: %fortytwo:_(s32) = G_CONSTANT i32 42
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %hundred, %fortytwo
-    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
-    ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR %select, [[C]](s32)
-    ; CHECK-NEXT: S_ENDPGM 0, implicit [[LSHR]](s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 50
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 21
+    ; CHECK-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT %cond(s1), [[C]], [[C1]]
+    ; CHECK-NEXT: S_ENDPGM 0, implicit [[SELECT]](s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
     %cond:_(s1) = G_ICMP intpred(eq), %reg, %zero
@@ -836,11 +808,8 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %hundred:_(s32) = G_CONSTANT i32 100
-    ; CHECK-NEXT: %fortytwo:_(s32) = G_CONSTANT i32 42
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %hundred, %fortytwo
-    ; CHECK-NEXT: %three:_(s32) = G_CONSTANT i32 3
-    ; CHECK-NEXT: %udiv:_(s32) = G_UREM %select, %three
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
+    ; CHECK-NEXT: %udiv:_(s32) = G_SELECT %cond(s1), [[C]], %zero
     ; CHECK-NEXT: S_ENDPGM 0, implicit %udiv(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -865,11 +834,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %ten:_(s64) = G_CONSTANT i64 10
-    ; CHECK-NEXT: %twenty:_(s64) = G_CONSTANT i64 20
-    ; CHECK-NEXT: %select:_(s64) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %two:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: %lshr:_(s64) = G_LSHR %select, %two(s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
+    ; CHECK-NEXT: %lshr:_(s64) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %lshr(s64)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -894,11 +861,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %three:_(s32) = G_CONSTANT i32 3
-    ; CHECK-NEXT: %five:_(s32) = G_CONSTANT i32 5
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %three, %five
-    ; CHECK-NEXT: %val:_(s64) = G_CONSTANT i64 8096
-    ; CHECK-NEXT: %lshr:_(s64) = G_LSHR %val, %select(s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 1012
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 253
+    ; CHECK-NEXT: %lshr:_(s64) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %lshr(s64)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -923,11 +888,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %ten:_(s64) = G_CONSTANT i64 10
-    ; CHECK-NEXT: %twenty:_(s64) = G_CONSTANT i64 20
-    ; CHECK-NEXT: %select:_(s64) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %two:_(s32) = G_CONSTANT i32 2
-    ; CHECK-NEXT: %ashr:_(s64) = G_ASHR %select, %two(s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 2
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 5
+    ; CHECK-NEXT: %ashr:_(s64) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %ashr(s64)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -952,11 +915,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %three:_(s32) = G_CONSTANT i32 3
-    ; CHECK-NEXT: %five:_(s32) = G_CONSTANT i32 5
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %three, %five
-    ; CHECK-NEXT: %val:_(s64) = G_CONSTANT i64 -8096
-    ; CHECK-NEXT: %ashr:_(s64) = G_ASHR %val, %select(s32)
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1012
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 -253
+    ; CHECK-NEXT: %ashr:_(s64) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %ashr(s64)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -983,9 +944,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
     ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
     ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %smin:_(s32) = G_SMIN %select, %thirty
+    ; CHECK-NEXT: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN %ten, %thirty
+    ; CHECK-NEXT: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN %twenty, %thirty
+    ; CHECK-NEXT: %smin:_(s32) = G_SELECT %cond(s1), [[SMIN]], [[SMIN1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %smin(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1012,9 +974,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
     ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
     ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %smax:_(s32) = G_SMAX %select, %thirty
+    ; CHECK-NEXT: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX %ten, %thirty
+    ; CHECK-NEXT: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX %twenty, %thirty
+    ; CHECK-NEXT: %smax:_(s32) = G_SELECT %cond(s1), [[SMAX]], [[SMAX1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %smax(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1041,9 +1004,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
     ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
     ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %umin:_(s32) = G_UMIN %select, %thirty
+    ; CHECK-NEXT: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN %ten, %thirty
+    ; CHECK-NEXT: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN %twenty, %thirty
+    ; CHECK-NEXT: %umin:_(s32) = G_SELECT %cond(s1), [[UMIN]], [[UMIN1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %umin(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1070,9 +1034,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
     ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
     ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %umax:_(s32) = G_UMAX %select, %thirty
+    ; CHECK-NEXT: [[UMAX:%[0-9]+]]:_(s32) = G_UMAX %ten, %thirty
+    ; CHECK-NEXT: [[UMAX1:%[0-9]+]]:_(s32) = G_UMAX %twenty, %thirty
+    ; CHECK-NEXT: %umax:_(s32) = G_SELECT %cond(s1), [[UMAX]], [[UMAX1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %umax(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1099,9 +1064,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %sixteen:_(s32) = G_FCONSTANT float 1.600000e+01
-    ; CHECK-NEXT: %fadd:_(s32) = nnan G_FADD %select, %sixteen
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD %two, %sixteen
+    ; CHECK-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD %four, %sixteen
+    ; CHECK-NEXT: %fadd:_(s32) = nnan G_SELECT %cond(s1), [[FADD]], [[FADD1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %fadd(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1128,9 +1094,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %sixteen:_(s32) = G_FCONSTANT float 1.600000e+01
-    ; CHECK-NEXT: %fadd:_(s32) = nnan G_FADD %sixteen, %select
+    ; CHECK-NEXT: [[FADD:%[0-9]+]]:_(s32) = G_FADD %sixteen, %two
+    ; CHECK-NEXT: [[FADD1:%[0-9]+]]:_(s32) = G_FADD %sixteen, %four
+    ; CHECK-NEXT: %fadd:_(s32) = nnan G_SELECT %cond(s1), [[FADD]], [[FADD1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %fadd(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1157,9 +1124,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %sixteen:_(s32) = G_FCONSTANT float 1.600000e+01
-    ; CHECK-NEXT: %fsub:_(s32) = nnan G_FSUB %select, %sixteen
+    ; CHECK-NEXT: [[FSUB:%[0-9]+]]:_(s32) = G_FSUB %two, %sixteen
+    ; CHECK-NEXT: [[FSUB1:%[0-9]+]]:_(s32) = G_FSUB %four, %sixteen
+    ; CHECK-NEXT: %fsub:_(s32) = nnan G_SELECT %cond(s1), [[FSUB]], [[FSUB1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %fsub(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1186,9 +1154,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %sixteen:_(s32) = G_FCONSTANT float 1.600000e+01
-    ; CHECK-NEXT: %fmul:_(s32) = nnan G_FMUL %select, %sixteen
+    ; CHECK-NEXT: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL %two, %sixteen
+    ; CHECK-NEXT: [[FMUL1:%[0-9]+]]:_(s32) = G_FMUL %four, %sixteen
+    ; CHECK-NEXT: %fmul:_(s32) = nnan G_SELECT %cond(s1), [[FMUL]], [[FMUL1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %fmul(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1215,9 +1184,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %sixteen:_(s32) = G_FCONSTANT float 1.600000e+01
-    ; CHECK-NEXT: %fdiv:_(s32) = nnan G_FDIV %select, %sixteen
+    ; CHECK-NEXT: [[FDIV:%[0-9]+]]:_(s32) = G_FDIV %two, %sixteen
+    ; CHECK-NEXT: [[FDIV1:%[0-9]+]]:_(s32) = G_FDIV %four, %sixteen
+    ; CHECK-NEXT: %fdiv:_(s32) = nnan G_SELECT %cond(s1), [[FDIV]], [[FDIV1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %fdiv(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1244,9 +1214,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %sixteen:_(s32) = G_FCONSTANT float 1.600000e+01
-    ; CHECK-NEXT: %frem:_(s32) = nnan G_FREM %select, %sixteen
+    ; CHECK-NEXT: [[FREM:%[0-9]+]]:_(s32) = G_FREM %two, %sixteen
+    ; CHECK-NEXT: [[FREM1:%[0-9]+]]:_(s32) = G_FREM %four, %sixteen
+    ; CHECK-NEXT: %frem:_(s32) = nnan G_SELECT %cond(s1), [[FREM]], [[FREM1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %frem(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1302,9 +1273,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %sixteen:_(s32) = G_FCONSTANT float 1.600000e+01
-    ; CHECK-NEXT: %fminnum:_(s32) = nnan G_FMINNUM %select, %sixteen
+    ; CHECK-NEXT: [[FMINNUM:%[0-9]+]]:_(s32) = G_FMINNUM %two, %sixteen
+    ; CHECK-NEXT: [[FMINNUM1:%[0-9]+]]:_(s32) = G_FMINNUM %four, %sixteen
+    ; CHECK-NEXT: %fminnum:_(s32) = nnan G_SELECT %cond(s1), [[FMINNUM]], [[FMINNUM1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %fminnum(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1360,9 +1332,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %three:_(s32) = G_FCONSTANT float 3.000000e+00
-    ; CHECK-NEXT: %fmaxnum:_(s32) = nnan G_FMAXNUM %select, %three
+    ; CHECK-NEXT: [[FMAXNUM:%[0-9]+]]:_(s32) = G_FMAXNUM %two, %three
+    ; CHECK-NEXT: [[FMAXNUM1:%[0-9]+]]:_(s32) = G_FMAXNUM %four, %three
+    ; CHECK-NEXT: %fmaxnum:_(s32) = nnan G_SELECT %cond(s1), [[FMAXNUM]], [[FMAXNUM1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %fmaxnum(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1418,9 +1391,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %sixteen:_(s32) = G_FCONSTANT float 1.600000e+01
-    ; CHECK-NEXT: %fminimum:_(s32) = nnan G_FMINIMUM %select, %sixteen
+    ; CHECK-NEXT: [[FMINIMUM:%[0-9]+]]:_(s32) = G_FMINIMUM %two, %sixteen
+    ; CHECK-NEXT: [[FMINIMUM1:%[0-9]+]]:_(s32) = G_FMINIMUM %four, %sixteen
+    ; CHECK-NEXT: %fminimum:_(s32) = nnan G_SELECT %cond(s1), [[FMINIMUM]], [[FMINIMUM1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %fminimum(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1447,9 +1421,10 @@ body: |
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
     ; CHECK-NEXT: %two:_(s32) = G_FCONSTANT float 2.000000e+00
     ; CHECK-NEXT: %four:_(s32) = G_FCONSTANT float 4.000000e+00
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %two, %four
     ; CHECK-NEXT: %three:_(s32) = G_FCONSTANT float 3.000000e+00
-    ; CHECK-NEXT: %fmaximum:_(s32) = nnan G_FMAXIMUM %select, %three
+    ; CHECK-NEXT: [[FMAXIMUM:%[0-9]+]]:_(s32) = G_FMAXIMUM %two, %three
+    ; CHECK-NEXT: [[FMAXIMUM1:%[0-9]+]]:_(s32) = G_FMAXIMUM %four, %three
+    ; CHECK-NEXT: %fmaximum:_(s32) = nnan G_SELECT %cond(s1), [[FMAXIMUM]], [[FMAXIMUM1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %fmaximum(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1556,18 +1531,15 @@ tracksRegLiveness: true
 body: |
   bb.0:
     liveins: $vgpr0
-
     ; CHECK-LABEL: name: fold_add_copy_into_select_s32_0
     ; CHECK: liveins: $vgpr0
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
-    ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %add:_(s32) = G_ADD %select, %thirty
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 50
+    ; CHECK-NEXT: %add:_(s32) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %add(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0
@@ -1593,11 +1565,9 @@ body: |
     ; CHECK-NEXT: %reg:_(s32) = COPY $vgpr0
     ; CHECK-NEXT: %zero:_(s32) = G_CONSTANT i32 0
     ; CHECK-NEXT: %cond:_(s1) = G_ICMP intpred(eq), %reg(s32), %zero
-    ; CHECK-NEXT: %ten:_(s32) = G_CONSTANT i32 10
-    ; CHECK-NEXT: %twenty:_(s32) = G_CONSTANT i32 20
-    ; CHECK-NEXT: %select:_(s32) = G_SELECT %cond(s1), %ten, %twenty
-    ; CHECK-NEXT: %thirty:_(s32) = G_CONSTANT i32 30
-    ; CHECK-NEXT: %add:_(s32) = G_ADD %thirty, %select
+    ; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 40
+    ; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 50
+    ; CHECK-NEXT: %add:_(s32) = G_SELECT %cond(s1), [[C]], [[C1]]
     ; CHECK-NEXT: S_ENDPGM 0, implicit %add(s32)
     %reg:_(s32) = COPY $vgpr0
     %zero:_(s32) = G_CONSTANT i32 0


        


More information about the llvm-commits mailing list