[llvm] r365743 - [MIPS GlobalISel] RegBankSelect for chains of ambiguous instructions

Petar Avramovic via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 11 02:22:49 PDT 2019


Author: petar.avramovic
Date: Thu Jul 11 02:22:49 2019
New Revision: 365743

URL: http://llvm.org/viewvc/llvm-project?rev=365743&view=rev
Log:
[MIPS GlobalISel] RegBankSelect for chains of ambiguous instructions

When one of the uses/defs of ambiguous instruction is also ambiguous
visit it recursively and search its uses/defs for instruction with
only one mapping available.
When all instruction in a chain are ambiguous arbitrary mapping can
be selected. For s64 operands in ambiguous chain fprb is selected since
it results in less instructions then having to narrow scalar s64 to s32.
For s32 both gprb and fprb result in same number of instructions and
gprb is selected like a general purpose option.

At the moment we always avoid cross register bank copies.
TODO: Implement a model for costs calculations of different mappings
on same instruction and cross bank copies. Allow cross bank copies
when appropriate according to cost model.

Differential Revision: https://reviews.llvm.org/D64485

Added:
    llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll
    llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll
    llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll
    llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
    llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
    llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir
Modified:
    llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.cpp
    llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.h
    llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll
    llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll
    llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll
    llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
    llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir
    llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir

Modified: llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.cpp?rev=365743&r1=365742&r2=365743&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.cpp (original)
+++ llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.cpp Thu Jul 11 02:22:49 2019
@@ -210,8 +210,11 @@ MipsRegisterBankInfo::AmbiguousRegDefUse
   }
 }
 
-bool MipsRegisterBankInfo::TypeInfoForMF::visit(const MachineInstr *MI) {
+bool MipsRegisterBankInfo::TypeInfoForMF::visit(
+    const MachineInstr *MI, const MachineInstr *WaitingForTypeOfMI) {
   assert(isAmbiguous(MI->getOpcode()) && "Visiting non-Ambiguous opcode.\n");
+  if (wasVisited(MI))
+    return true; // InstType has already been determined for MI.
 
   startVisit(MI);
   AmbiguousRegDefUseContainer DefUseContainer(MI);
@@ -224,6 +227,21 @@ bool MipsRegisterBankInfo::TypeInfoForMF
   if (visitAdjacentInstrs(MI, DefUseContainer.getUseDefs(), false))
     return true;
 
+  // All MI's adjacent instructions, are ambiguous.
+  if (!WaitingForTypeOfMI) {
+    // This is chain of ambiguous instructions.
+    setTypes(MI, InstType::Ambiguous);
+    return true;
+  }
+  // Excluding WaitingForTypeOfMI, MI is either connected to chains of ambiguous
+  // instructions or has no other adjacent instructions. Anyway InstType could
+  // not be determined. There could be unexplored path from some of
+  // WaitingForTypeOfMI's adjacent instructions to an instruction with only one
+  // mapping available.
+  // We are done with this branch, add MI to WaitingForTypeOfMI's WaitingQueue,
+  // this way when WaitingForTypeOfMI figures out its InstType same InstType
+  // will be assigned to all instructions in this branch.
+  addToWaitingQueue(WaitingForTypeOfMI, MI);
   return false;
 }
 
@@ -246,15 +264,23 @@ bool MipsRegisterBankInfo::TypeInfoForMF
       return true;
     }
 
-    if (isAmbiguous(AdjMI->getOpcode())) {
-      // Chains of ambiguous instructions are not supported.
-      return false;
-    }
-
     // Defaults to integer instruction. Includes G_MERGE_VALUES and
     // G_UNMERGE_VALUES.
-    setTypes(MI, InstType::Integer);
-    return true;
+    if (!isAmbiguous(AdjMI->getOpcode())) {
+      setTypes(MI, InstType::Integer);
+      return true;
+    }
+
+    // When AdjMI was visited first, MI has to continue to explore remaining
+    // adjacent instructions and determine InstType without visiting AdjMI.
+    if (!wasVisited(AdjMI) ||
+        getRecordedTypeForInstr(AdjMI) != InstType::NotDetermined) {
+      if (visit(AdjMI, MI)) {
+        // InstType is successfully determined and is same as for AdjMI.
+        setTypes(MI, getRecordedTypeForInstr(AdjMI));
+        return true;
+      }
+    }
   }
   return false;
 }
@@ -262,6 +288,9 @@ bool MipsRegisterBankInfo::TypeInfoForMF
 void MipsRegisterBankInfo::TypeInfoForMF::setTypes(const MachineInstr *MI,
                                                    InstType InstTy) {
   changeRecordedTypeForInstr(MI, InstTy);
+  for (const MachineInstr *WaitingInstr : getWaitingQueueFor(MI)) {
+    setTypes(WaitingInstr, InstTy);
+  }
 }
 
 void MipsRegisterBankInfo::TypeInfoForMF::setTypesAccordingToPhysicalRegister(
@@ -288,7 +317,7 @@ void MipsRegisterBankInfo::TypeInfoForMF
 
 MipsRegisterBankInfo::InstType
 MipsRegisterBankInfo::TypeInfoForMF::determineInstType(const MachineInstr *MI) {
-  visit(MI);
+  visit(MI, nullptr);
   return getRecordedTypeForInstr(MI);
 }
 
@@ -296,6 +325,7 @@ void MipsRegisterBankInfo::TypeInfoForMF
     llvm::StringRef FunctionName) {
   if (MFName != FunctionName) {
     MFName = FunctionName;
+    WaitingQueues.clear();
     Types.clear();
   }
 }
@@ -354,7 +384,8 @@ MipsRegisterBankInfo::getInstrMapping(co
       InstTy = TI.determineInstType(&MI);
     }
 
-    if (InstTy == InstType::FloatingPoint) { // fprb
+    if (InstTy == InstType::FloatingPoint ||
+        (Size == 64 && InstTy == InstType::Ambiguous)) { // fprb
       OperandsMapping =
           getOperandsMapping({Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
                                          : &Mips::ValueMappings[Mips::DPRIdx],
@@ -378,7 +409,8 @@ MipsRegisterBankInfo::getInstrMapping(co
       InstTy = TI.determineInstType(&MI);
     }
 
-    if (InstTy == InstType::FloatingPoint) { // fprb
+    if (InstTy == InstType::FloatingPoint ||
+        (Size == 64 && InstTy == InstType::Ambiguous)) { // fprb
       OperandsMapping =
           getOperandsMapping({Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
                                          : &Mips::ValueMappings[Mips::DPRIdx],
@@ -421,7 +453,8 @@ MipsRegisterBankInfo::getInstrMapping(co
       InstTy = TI.determineInstType(&MI);
     }
 
-    if (InstTy == InstType::FloatingPoint) { // fprb
+    if (InstTy == InstType::FloatingPoint ||
+        (Size == 64 && InstTy == InstType::Ambiguous)) { // fprb
       const RegisterBankInfo::ValueMapping *Bank =
           Size == 32 ? &Mips::ValueMappings[Mips::SPRIdx]
                      : &Mips::ValueMappings[Mips::DPRIdx];

Modified: llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.h?rev=365743&r1=365742&r2=365743&view=diff
==============================================================================
--- llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.h (original)
+++ llvm/trunk/lib/Target/Mips/MipsRegisterBankInfo.h Thu Jul 11 02:22:49 2019
@@ -45,13 +45,19 @@ private:
   /// We assign InstType to such instructions as it helps us to avoid cross bank
   /// copies. InstType deppends on context.
   enum InstType {
+    /// Temporary type, when visit(..., nullptr) finishes will convert to one of
+    /// the remaining types: Integer, FloatingPoint or Ambiguous.
     NotDetermined,
     /// Connected with instruction that interprets 'bags of bits' as integers.
     /// Select gprb to avoid cross bank copies.
     Integer,
     /// Connected with instruction that interprets 'bags of bits' as floating
     /// point numbers. Select fprb to avoid cross bank copies.
-    FloatingPoint
+    FloatingPoint,
+    /// Represents moving 'bags of bits' around. Select same bank for entire
+    /// chain to avoid cross bank copies. Currently we select fprb for s64 and
+    /// gprb for s32 Ambiguous operands.
+    Ambiguous
   };
 
   /// Some generic instructions have operands that can be mapped to either fprb
@@ -76,16 +82,23 @@ private:
   class TypeInfoForMF {
     /// MachineFunction name is used to recognise when MF changes.
     std::string MFName = "";
+    /// <key, value> : value is vector of all MachineInstrs that are waiting for
+    /// key to figure out type of some of its ambiguous operands.
+    DenseMap<const MachineInstr *, SmallVector<const MachineInstr *, 2>>
+        WaitingQueues;
     /// Recorded InstTypes for visited instructions.
     DenseMap<const MachineInstr *, InstType> Types;
 
-    bool visit(const MachineInstr *MI);
+    /// Recursively visit MI's adjacent instructions and find MI's InstType.
+    bool visit(const MachineInstr *MI, const MachineInstr *WaitingForTypeOfMI);
 
     /// Visit MI's adjacent UseDefs or DefUses.
     bool visitAdjacentInstrs(const MachineInstr *MI,
                              SmallVectorImpl<MachineInstr *> &AdjacentInstrs,
                              bool isDefUse);
 
+    /// Set type for MI, and recursively for all instructions that are
+    /// waiting for MI's type.
     void setTypes(const MachineInstr *MI, InstType ITy);
 
     /// InstType for MI is determined, set it to InstType that corresponds to
@@ -97,8 +110,11 @@ private:
     /// Set default values for MI in order to start visit.
     void startVisit(const MachineInstr *MI) {
       Types.try_emplace(MI, InstType::NotDetermined);
+      WaitingQueues.try_emplace(MI);
     }
 
+    /// Returns true if instruction was already visited. Type might not be
+    /// determined at this point but will be when visit(..., nullptr) finishes.
     bool wasVisited(const MachineInstr *MI) const { return Types.count(MI); };
 
     /// Returns recorded type for instruction.
@@ -113,6 +129,20 @@ private:
       Types.find(MI)->getSecond() = InstTy;
     };
 
+    /// Returns WaitingQueue for instruction.
+    const SmallVectorImpl<const MachineInstr *> &
+    getWaitingQueueFor(const MachineInstr *MI) const {
+      assert(WaitingQueues.count(MI) && "Instruction was not visited!");
+      return WaitingQueues.find(MI)->getSecond();
+    };
+
+    /// Add WaitingForMI to MI's WaitingQueue.
+    void addToWaitingQueue(const MachineInstr *MI,
+                           const MachineInstr *WaitingForMI) {
+      assert(WaitingQueues.count(MI) && "Instruction was not visited!");
+      WaitingQueues.find(MI)->getSecond().push_back(WaitingForMI);
+    };
+
   public:
     InstType determineInstType(const MachineInstr *MI);
 

Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll?rev=365743&r1=365742&r2=365743&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/load.ll Thu Jul 11 02:22:49 2019
@@ -26,6 +26,19 @@ entry:
   ret i64 %0
 }
 
+define void @load_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b) {
+; MIPS32-LABEL: load_ambiguous_i64_in_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    ldc1 $f0, 0($4)
+; MIPS32-NEXT:    sdc1 $f0, 0($5)
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load i64, i64* %i64_ptr_a
+  store i64 %0, i64* %i64_ptr_b
+  ret void
+}
+
 define float @load_float(float* %ptr) {
 ; MIPS32-LABEL: load_float:
 ; MIPS32:       # %bb.0: # %entry
@@ -37,6 +50,19 @@ entry:
   ret float %0
 }
 
+define void @load_ambiguous_float_in_gpr(float* %float_ptr_a, float* %float_ptr_b) {
+; MIPS32-LABEL: load_ambiguous_float_in_gpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lw $1, 0($4)
+; MIPS32-NEXT:    sw $1, 0($5)
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load float, float* %float_ptr_a
+  store float %0, float* %float_ptr_b
+  ret void
+}
+
 define double @load_double(double* %ptr) {
 ; MIPS32-LABEL: load_double:
 ; MIPS32:       # %bb.0: # %entry

Added: llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll?rev=365743&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s32.ll Thu Jul 11 02:22:49 2019
@@ -0,0 +1,742 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
+
+define void @long_chain_ambiguous_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i32* %a, i32* %b, i32* %c, i32* %result) {
+; MIPS32-LABEL: long_chain_ambiguous_i32_in_gpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -48
+; MIPS32-NEXT:    .cfi_def_cfa_offset 48
+; MIPS32-NEXT:    addiu $1, $sp, 64
+; MIPS32-NEXT:    lw $1, 0($1)
+; MIPS32-NEXT:    addiu $2, $sp, 68
+; MIPS32-NEXT:    lw $2, 0($2)
+; MIPS32-NEXT:    addiu $3, $sp, 72
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    ori $8, $zero, 1
+; MIPS32-NEXT:    and $8, $4, $8
+; MIPS32-NEXT:    sw $1, 44($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 40($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 36($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 32($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $8, $BB0_9
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %pre.PHI.1
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB0_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.2: # %pre.PHI.1.0
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 32($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB0_5
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.3: # %b.PHI.1.0
+; MIPS32-NEXT:    lw $1, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB0_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_4: # %b.PHI.1.1
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB0_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_5: # %b.PHI.1.2
+; MIPS32-NEXT:    lw $1, 24($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB0_6: # %b.PHI.1
+; MIPS32-NEXT:    lw $1, 16($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $2, $zero, 1
+; MIPS32-NEXT:    lw $3, 32($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $2, $3, $2
+; MIPS32-NEXT:    move $4, $1
+; MIPS32-NEXT:    sw $1, 12($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $2, $BB0_8
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.7: # %b.PHI.1
+; MIPS32-NEXT:    j $BB0_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_8: # %b.PHI.1.end
+; MIPS32-NEXT:    lw $1, 12($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($2)
+; MIPS32-NEXT:    addiu $sp, $sp, 48
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_9: # %pre.PHI.2
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 40($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB0_11
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.10: # %pre.PHI.2
+; MIPS32-NEXT:    j $BB0_12
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_11: # %b.PHI.2.0
+; MIPS32-NEXT:    lw $1, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB0_13
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_12: # %b.PHI.2.1
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB0_13: # %b.PHI.2
+; MIPS32-NEXT:    lw $1, 4($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $2, $zero, 1
+; MIPS32-NEXT:    lw $3, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $2, $3, $2
+; MIPS32-NEXT:    move $4, $1
+; MIPS32-NEXT:    sw $1, 0($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $2, $BB0_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.14: # %b.PHI.2.end
+; MIPS32-NEXT:    lw $1, 0($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($2)
+; MIPS32-NEXT:    addiu $sp, $sp, 48
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_15: # %b.PHI.3
+; MIPS32-NEXT:    lw $1, 8($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 8($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $3, $zero, 1
+; MIPS32-NEXT:    lw $4, 32($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $5, $4, $3
+; MIPS32-NEXT:    movn $1, $2, $5
+; MIPS32-NEXT:    lw $5, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $3, $5, $3
+; MIPS32-NEXT:    move $6, $2
+; MIPS32-NEXT:    movn $6, $1, $3
+; MIPS32-NEXT:    lw $1, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $6, 0($1)
+; MIPS32-NEXT:    sw $2, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 48
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+pre.PHI.1:
+  br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+pre.PHI.1.0:
+  br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+b.PHI.1.0:
+  %phi1.0 = load i32, i32* %a
+  br label %b.PHI.1
+
+b.PHI.1.1:
+  %phi1.1 = load i32, i32* %b
+  br label %b.PHI.1
+
+b.PHI.1.2:
+  %phi1.2 = load i32, i32* %c
+  br label %b.PHI.1
+
+b.PHI.1:
+  %phi1 = phi i32 [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+  br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+b.PHI.1.end:
+  store i32 %phi1, i32* %result
+  ret void
+
+pre.PHI.2:
+  br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+b.PHI.2.0:
+  %phi2.0 = load i32, i32* %a
+  br label %b.PHI.2
+
+b.PHI.2.1:
+  %phi2.1 = load i32, i32* %b
+  br label %b.PHI.2
+
+b.PHI.2:
+  %phi2 = phi i32 [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+   br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+b.PHI.2.end:
+  store i32 %phi2, i32* %result
+  ret void
+
+b.PHI.3:
+  %phi3 = phi i32 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %phi4 = phi i32 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %sel_1.2 = select i1 %cnd2, i32 %phi3, i32 %phi4
+  %sel_3_1.2 = select i1 %cnd1, i32 %sel_1.2, i32 %phi3
+  store i32 %sel_3_1.2, i32* %result
+  store i32 %phi3, i32* %result
+  ret void
+
+}
+
+define void @long_chain_i32_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i32* %a, i32* %b, i32* %c, i32* %result) {
+; MIPS32-LABEL: long_chain_i32_in_gpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -56
+; MIPS32-NEXT:    .cfi_def_cfa_offset 56
+; MIPS32-NEXT:    addiu $1, $sp, 72
+; MIPS32-NEXT:    lw $1, 0($1)
+; MIPS32-NEXT:    addiu $2, $sp, 76
+; MIPS32-NEXT:    lw $2, 0($2)
+; MIPS32-NEXT:    addiu $3, $sp, 80
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    ori $8, $zero, 0
+; MIPS32-NEXT:    ori $9, $zero, 1
+; MIPS32-NEXT:    and $9, $4, $9
+; MIPS32-NEXT:    sw $1, 52($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 48($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 44($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 40($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 36($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 32($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $8, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $9, $BB1_9
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %pre.PHI.1
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB1_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.2: # %pre.PHI.1.0
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 40($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB1_5
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.3: # %b.PHI.1.0
+; MIPS32-NEXT:    lw $1, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB1_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_4: # %b.PHI.1.1
+; MIPS32-NEXT:    lw $1, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB1_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_5: # %b.PHI.1.2
+; MIPS32-NEXT:    lw $1, 32($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB1_6: # %b.PHI.1
+; MIPS32-NEXT:    lw $1, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $2, $zero, 1
+; MIPS32-NEXT:    lw $3, 40($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $2, $3, $2
+; MIPS32-NEXT:    move $4, $1
+; MIPS32-NEXT:    lw $5, 24($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 12($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $2, $BB1_8
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.7: # %b.PHI.1
+; MIPS32-NEXT:    j $BB1_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_8: # %b.PHI.1.end
+; MIPS32-NEXT:    lw $1, 16($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($2)
+; MIPS32-NEXT:    addiu $sp, $sp, 56
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_9: # %pre.PHI.2
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 48($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB1_11
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.10: # %pre.PHI.2
+; MIPS32-NEXT:    j $BB1_12
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_11: # %b.PHI.2.0
+; MIPS32-NEXT:    lw $1, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB1_13
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_12: # %b.PHI.2.1
+; MIPS32-NEXT:    lw $1, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB1_13: # %b.PHI.2
+; MIPS32-NEXT:    lw $1, 4($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $2, $zero, 1
+; MIPS32-NEXT:    lw $3, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $2, $3, $2
+; MIPS32-NEXT:    move $4, $1
+; MIPS32-NEXT:    move $5, $1
+; MIPS32-NEXT:    sw $1, 0($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 12($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $2, $BB1_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.14: # %b.PHI.2.end
+; MIPS32-NEXT:    lw $1, 0($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($2)
+; MIPS32-NEXT:    addiu $sp, $sp, 56
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_15: # %b.PHI.3
+; MIPS32-NEXT:    lw $1, 8($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 12($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $3, $zero, 1
+; MIPS32-NEXT:    lw $4, 40($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $5, $4, $3
+; MIPS32-NEXT:    movn $1, $2, $5
+; MIPS32-NEXT:    lw $5, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $3, $5, $3
+; MIPS32-NEXT:    move $6, $2
+; MIPS32-NEXT:    movn $6, $1, $3
+; MIPS32-NEXT:    lw $1, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $6, 0($1)
+; MIPS32-NEXT:    sw $2, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 56
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+pre.PHI.1:
+  br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+pre.PHI.1.0:
+  br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+b.PHI.1.0:
+  %phi1.0 = load i32, i32* %a
+  br label %b.PHI.1
+
+b.PHI.1.1:
+  %phi1.1 = load i32, i32* %b
+  br label %b.PHI.1
+
+b.PHI.1.2:
+  %phi1.2 = load i32, i32* %c
+  br label %b.PHI.1
+
+b.PHI.1:
+  %phi1 = phi i32 [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+  br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+b.PHI.1.end:
+  store i32 %phi1, i32* %result
+  ret void
+
+pre.PHI.2:
+  br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+b.PHI.2.0:
+  %phi2.0 = load i32, i32* %a
+  br label %b.PHI.2
+
+b.PHI.2.1:
+  %phi2.1 = load i32, i32* %b
+  br label %b.PHI.2
+
+b.PHI.2:
+  %phi2 = phi i32 [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+   br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+b.PHI.2.end:
+  store i32 %phi2, i32* %result
+  ret void
+
+b.PHI.3:
+  %phi3 = phi i32 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %phi4 = phi i32 [ %phi2, %b.PHI.2], [ 0, %b.PHI.1 ]
+  %sel_1.2 = select i1 %cnd2, i32 %phi3, i32 %phi4
+  %sel_3_1.2 = select i1 %cnd1, i32 %sel_1.2, i32 %phi3
+  store i32 %sel_3_1.2, i32* %result
+  store i32 %phi3, i32* %result
+  ret void
+}
+
+define void @long_chain_ambiguous_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, float* %a, float* %b, float* %c, float* %result) {
+; MIPS32-LABEL: long_chain_ambiguous_float_in_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -48
+; MIPS32-NEXT:    .cfi_def_cfa_offset 48
+; MIPS32-NEXT:    addiu $1, $sp, 64
+; MIPS32-NEXT:    lw $1, 0($1)
+; MIPS32-NEXT:    addiu $2, $sp, 68
+; MIPS32-NEXT:    lw $2, 0($2)
+; MIPS32-NEXT:    addiu $3, $sp, 72
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    ori $8, $zero, 1
+; MIPS32-NEXT:    and $8, $4, $8
+; MIPS32-NEXT:    sw $1, 44($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 40($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 36($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 32($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $8, $BB2_9
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %pre.PHI.1
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB2_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.2: # %pre.PHI.1.0
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 32($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB2_5
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.3: # %b.PHI.1.0
+; MIPS32-NEXT:    lw $1, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB2_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_4: # %b.PHI.1.1
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB2_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_5: # %b.PHI.1.2
+; MIPS32-NEXT:    lw $1, 24($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB2_6: # %b.PHI.1
+; MIPS32-NEXT:    lw $1, 16($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $2, $zero, 1
+; MIPS32-NEXT:    lw $3, 32($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $2, $3, $2
+; MIPS32-NEXT:    move $4, $1
+; MIPS32-NEXT:    sw $1, 12($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $2, $BB2_8
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.7: # %b.PHI.1
+; MIPS32-NEXT:    j $BB2_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_8: # %b.PHI.1.end
+; MIPS32-NEXT:    lw $1, 12($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($2)
+; MIPS32-NEXT:    addiu $sp, $sp, 48
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_9: # %pre.PHI.2
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 40($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB2_11
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.10: # %pre.PHI.2
+; MIPS32-NEXT:    j $BB2_12
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_11: # %b.PHI.2.0
+; MIPS32-NEXT:    lw $1, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB2_13
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_12: # %b.PHI.2.1
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    sw $2, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB2_13: # %b.PHI.2
+; MIPS32-NEXT:    lw $1, 4($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $2, $zero, 1
+; MIPS32-NEXT:    lw $3, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $2, $3, $2
+; MIPS32-NEXT:    move $4, $1
+; MIPS32-NEXT:    sw $1, 0($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $2, $BB2_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.14: # %b.PHI.2.end
+; MIPS32-NEXT:    lw $1, 0($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($2)
+; MIPS32-NEXT:    addiu $sp, $sp, 48
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_15: # %b.PHI.3
+; MIPS32-NEXT:    lw $1, 8($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 8($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $3, $zero, 1
+; MIPS32-NEXT:    lw $4, 32($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $5, $4, $3
+; MIPS32-NEXT:    movn $1, $2, $5
+; MIPS32-NEXT:    lw $5, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $3, $5, $3
+; MIPS32-NEXT:    move $6, $2
+; MIPS32-NEXT:    movn $6, $1, $3
+; MIPS32-NEXT:    lw $1, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $6, 0($1)
+; MIPS32-NEXT:    sw $2, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 48
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+pre.PHI.1:
+  br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+pre.PHI.1.0:
+  br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+b.PHI.1.0:
+  %phi1.0 = load float, float* %a
+  br label %b.PHI.1
+
+b.PHI.1.1:
+  %phi1.1 = load float, float* %b
+  br label %b.PHI.1
+
+b.PHI.1.2:
+  %phi1.2 = load float, float* %c
+  br label %b.PHI.1
+
+b.PHI.1:
+  %phi1 = phi float [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+  br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+b.PHI.1.end:
+  store float %phi1, float* %result
+  ret void
+
+pre.PHI.2:
+  br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+b.PHI.2.0:
+  %phi2.0 = load float, float* %a
+  br label %b.PHI.2
+
+b.PHI.2.1:
+  %phi2.1 = load float, float* %b
+  br label %b.PHI.2
+
+b.PHI.2:
+  %phi2 = phi float [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+   br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+b.PHI.2.end:
+  store float %phi2, float* %result
+  ret void
+
+b.PHI.3:
+  %phi3 = phi float [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %phi4 = phi float [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %sel_1.2 = select i1 %cnd2, float %phi3, float %phi4
+  %sel_3_1.2 = select i1 %cnd1, float %sel_1.2, float %phi3
+  store float %sel_3_1.2, float* %result
+  store float %phi3, float* %result
+  ret void
+}
+
+
+define void @long_chain_float_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, float* %a, float* %b, float* %c, float* %result) {
+; MIPS32-LABEL: long_chain_float_in_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -56
+; MIPS32-NEXT:    .cfi_def_cfa_offset 56
+; MIPS32-NEXT:    addiu $1, $sp, 72
+; MIPS32-NEXT:    lw $1, 0($1)
+; MIPS32-NEXT:    addiu $2, $sp, 76
+; MIPS32-NEXT:    lw $2, 0($2)
+; MIPS32-NEXT:    addiu $3, $sp, 80
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    ori $8, $zero, 0
+; MIPS32-NEXT:    mtc1 $8, $f0
+; MIPS32-NEXT:    ori $8, $zero, 1
+; MIPS32-NEXT:    and $8, $4, $8
+; MIPS32-NEXT:    sw $1, 52($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 48($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 44($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 40($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 36($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 32($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    swc1 $f0, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $8, $BB3_9
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %pre.PHI.1
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB3_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.2: # %pre.PHI.1.0
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 40($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB3_5
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.3: # %b.PHI.1.0
+; MIPS32-NEXT:    lw $1, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lwc1 $f0, 0($1)
+; MIPS32-NEXT:    swc1 $f0, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB3_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_4: # %b.PHI.1.1
+; MIPS32-NEXT:    lw $1, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lwc1 $f0, 0($1)
+; MIPS32-NEXT:    swc1 $f0, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB3_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_5: # %b.PHI.1.2
+; MIPS32-NEXT:    lw $1, 32($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lwc1 $f0, 0($1)
+; MIPS32-NEXT:    swc1 $f0, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB3_6: # %b.PHI.1
+; MIPS32-NEXT:    lwc1 $f0, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 40($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    mov.s $f1, $f0
+; MIPS32-NEXT:    lwc1 $f2, 24($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    swc1 $f0, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    swc1 $f1, 12($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    swc1 $f2, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $1, $BB3_8
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.7: # %b.PHI.1
+; MIPS32-NEXT:    j $BB3_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_8: # %b.PHI.1.end
+; MIPS32-NEXT:    lwc1 $f0, 16($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $1, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    swc1 $f0, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 56
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_9: # %pre.PHI.2
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 48($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB3_11
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.10: # %pre.PHI.2
+; MIPS32-NEXT:    j $BB3_12
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_11: # %b.PHI.2.0
+; MIPS32-NEXT:    lw $1, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lwc1 $f0, 0($1)
+; MIPS32-NEXT:    swc1 $f0, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB3_13
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_12: # %b.PHI.2.1
+; MIPS32-NEXT:    lw $1, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lwc1 $f0, 0($1)
+; MIPS32-NEXT:    swc1 $f0, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB3_13: # %b.PHI.2
+; MIPS32-NEXT:    lwc1 $f0, 4($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    mov.s $f1, $f0
+; MIPS32-NEXT:    mov.s $f2, $f0
+; MIPS32-NEXT:    swc1 $f0, 0($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    swc1 $f1, 12($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    swc1 $f2, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $1, $BB3_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.14: # %b.PHI.2.end
+; MIPS32-NEXT:    lwc1 $f0, 0($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $1, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    swc1 $f0, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 56
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_15: # %b.PHI.3
+; MIPS32-NEXT:    lwc1 $f0, 8($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lwc1 $f1, 12($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 40($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $3, $2, $1
+; MIPS32-NEXT:    movn.s $f0, $f1, $3
+; MIPS32-NEXT:    lw $3, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $3, $1
+; MIPS32-NEXT:    mov.s $f2, $f1
+; MIPS32-NEXT:    movn.s $f2, $f0, $1
+; MIPS32-NEXT:    lw $1, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    swc1 $f2, 0($1)
+; MIPS32-NEXT:    swc1 $f1, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 56
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+pre.PHI.1:
+  br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+pre.PHI.1.0:
+  br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+b.PHI.1.0:
+  %phi1.0 = load float, float* %a
+  br label %b.PHI.1
+
+b.PHI.1.1:
+  %phi1.1 = load float, float* %b
+  br label %b.PHI.1
+
+b.PHI.1.2:
+  %phi1.2 = load float, float* %c
+  br label %b.PHI.1
+
+b.PHI.1:
+  %phi1 = phi float [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+  br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+b.PHI.1.end:
+  store float %phi1, float* %result
+  ret void
+
+pre.PHI.2:
+  br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+b.PHI.2.0:
+  %phi2.0 = load float, float* %a
+  br label %b.PHI.2
+
+b.PHI.2.1:
+  %phi2.1 = load float, float* %b
+  br label %b.PHI.2
+
+b.PHI.2:
+  %phi2 = phi float [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+   br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+b.PHI.2.end:
+  store float %phi2, float* %result
+  ret void
+
+b.PHI.3:
+  %phi3 = phi float [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %phi4 = phi float [ %phi2, %b.PHI.2], [ 0.0, %b.PHI.1 ]
+  %sel_1.2 = select i1 %cnd2, float %phi3, float %phi4
+  %sel_3_1.2 = select i1 %cnd1, float %sel_1.2, float %phi3
+  store float %sel_3_1.2, float* %result
+  store float %phi3, float* %result
+  ret void
+}
+

Added: llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll?rev=365743&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/long_ambiguous_chain_s64.ll Thu Jul 11 02:22:49 2019
@@ -0,0 +1,795 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
+
+define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+; MIPS32-LABEL: long_chain_ambiguous_i64_in_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -72
+; MIPS32-NEXT:    .cfi_def_cfa_offset 72
+; MIPS32-NEXT:    addiu $1, $sp, 88
+; MIPS32-NEXT:    lw $1, 0($1)
+; MIPS32-NEXT:    addiu $2, $sp, 92
+; MIPS32-NEXT:    lw $2, 0($2)
+; MIPS32-NEXT:    addiu $3, $sp, 96
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    ori $8, $zero, 1
+; MIPS32-NEXT:    and $8, $4, $8
+; MIPS32-NEXT:    sw $1, 68($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 64($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 60($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 56($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 52($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 48($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 44($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $8, $BB0_9
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %pre.PHI.1
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB0_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.2: # %pre.PHI.1.0
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 56($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB0_5
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.3: # %b.PHI.1.0
+; MIPS32-NEXT:    lw $1, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 32($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB0_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_4: # %b.PHI.1.1
+; MIPS32-NEXT:    lw $1, 68($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 32($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB0_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_5: # %b.PHI.1.2
+; MIPS32-NEXT:    lw $1, 48($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 32($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:  $BB0_6: # %b.PHI.1
+; MIPS32-NEXT:    ldc1 $f0, 32($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 56($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    mov.d $f2, $f0
+; MIPS32-NEXT:    sdc1 $f0, 24($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f2, 16($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    bnez $1, $BB0_8
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.7: # %b.PHI.1
+; MIPS32-NEXT:    j $BB0_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_8: # %b.PHI.1.end
+; MIPS32-NEXT:    ldc1 $f0, 24($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 72
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_9: # %pre.PHI.2
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 64($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB0_11
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.10: # %pre.PHI.2
+; MIPS32-NEXT:    j $BB0_12
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_11: # %b.PHI.2.0
+; MIPS32-NEXT:    lw $1, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 8($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB0_13
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_12: # %b.PHI.2.1
+; MIPS32-NEXT:    lw $1, 68($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 8($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:  $BB0_13: # %b.PHI.2
+; MIPS32-NEXT:    ldc1 $f0, 8($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    mov.d $f2, $f0
+; MIPS32-NEXT:    sdc1 $f0, 0($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f2, 16($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    bnez $1, $BB0_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.14: # %b.PHI.2.end
+; MIPS32-NEXT:    ldc1 $f0, 0($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 72
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB0_15: # %b.PHI.3
+; MIPS32-NEXT:    ldc1 $f0, 16($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f2, 16($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 56($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $3, $2, $1
+; MIPS32-NEXT:    movn.d $f0, $f2, $3
+; MIPS32-NEXT:    lw $3, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $3, $1
+; MIPS32-NEXT:    mov.d $f4, $f2
+; MIPS32-NEXT:    movn.d $f4, $f0, $1
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f4, 0($1)
+; MIPS32-NEXT:    sdc1 $f2, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 72
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+pre.PHI.1:
+  br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+pre.PHI.1.0:
+  br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+b.PHI.1.0:
+  %phi1.0 = load i64, i64* %a
+  br label %b.PHI.1
+
+b.PHI.1.1:
+  %phi1.1 = load i64, i64* %b
+  br label %b.PHI.1
+
+b.PHI.1.2:
+  %phi1.2 = load i64, i64* %c
+  br label %b.PHI.1
+
+b.PHI.1:
+  %phi1 = phi i64 [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+  br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+b.PHI.1.end:
+  store i64 %phi1, i64* %result
+  ret void
+
+pre.PHI.2:
+  br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+b.PHI.2.0:
+  %phi2.0 = load i64, i64* %a
+  br label %b.PHI.2
+
+b.PHI.2.1:
+  %phi2.1 = load i64, i64* %b
+  br label %b.PHI.2
+
+b.PHI.2:
+  %phi2 = phi i64 [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+   br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+b.PHI.2.end:
+  store i64 %phi2, i64* %result
+  ret void
+
+b.PHI.3:
+  %phi3 = phi i64 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %phi4 = phi i64 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
+  %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
+  store i64 %sel_3_1.2, i64* %result
+  store i64 %phi3, i64* %result
+  ret void
+
+}
+
+define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+; MIPS32-LABEL: long_chain_i64_in_gpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -80
+; MIPS32-NEXT:    .cfi_def_cfa_offset 80
+; MIPS32-NEXT:    addiu $1, $sp, 96
+; MIPS32-NEXT:    lw $1, 0($1)
+; MIPS32-NEXT:    addiu $2, $sp, 100
+; MIPS32-NEXT:    lw $2, 0($2)
+; MIPS32-NEXT:    addiu $3, $sp, 104
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    ori $8, $zero, 0
+; MIPS32-NEXT:    ori $9, $zero, 1
+; MIPS32-NEXT:    and $9, $4, $9
+; MIPS32-NEXT:    sw $1, 76($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 72($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 68($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 64($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 60($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 56($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 52($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $8, 48($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $9, $BB1_9
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %pre.PHI.1
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 68($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB1_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.2: # %pre.PHI.1.0
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 64($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB1_5
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.3: # %b.PHI.1.0
+; MIPS32-NEXT:    lw $1, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    ori $3, $zero, 4
+; MIPS32-NEXT:    addu $3, $1, $3
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    sw $2, 44($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 40($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB1_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_4: # %b.PHI.1.1
+; MIPS32-NEXT:    lw $1, 76($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    ori $3, $zero, 4
+; MIPS32-NEXT:    addu $3, $1, $3
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    sw $2, 44($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 40($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB1_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_5: # %b.PHI.1.2
+; MIPS32-NEXT:    lw $1, 56($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    ori $3, $zero, 4
+; MIPS32-NEXT:    addu $3, $1, $3
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    sw $2, 44($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 40($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB1_6: # %b.PHI.1
+; MIPS32-NEXT:    lw $1, 40($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $3, $zero, 1
+; MIPS32-NEXT:    lw $4, 64($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $3, $4, $3
+; MIPS32-NEXT:    move $5, $2
+; MIPS32-NEXT:    move $6, $1
+; MIPS32-NEXT:    lw $7, 48($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $8, 48($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 36($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 32($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $8, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $3, $BB1_8
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.7: # %b.PHI.1
+; MIPS32-NEXT:    j $BB1_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_8: # %b.PHI.1.end
+; MIPS32-NEXT:    lw $1, 32($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($2)
+; MIPS32-NEXT:    ori $3, $zero, 4
+; MIPS32-NEXT:    addu $3, $2, $3
+; MIPS32-NEXT:    lw $4, 36($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $4, 0($3)
+; MIPS32-NEXT:    addiu $sp, $sp, 80
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_9: # %pre.PHI.2
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 72($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB1_11
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.10: # %pre.PHI.2
+; MIPS32-NEXT:    j $BB1_12
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_11: # %b.PHI.2.0
+; MIPS32-NEXT:    lw $1, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    ori $3, $zero, 4
+; MIPS32-NEXT:    addu $3, $1, $3
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    sw $2, 12($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB1_13
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_12: # %b.PHI.2.1
+; MIPS32-NEXT:    lw $1, 76($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 0($1)
+; MIPS32-NEXT:    ori $3, $zero, 4
+; MIPS32-NEXT:    addu $3, $1, $3
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    sw $2, 12($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB1_13: # %b.PHI.2
+; MIPS32-NEXT:    lw $1, 8($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 12($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $3, $zero, 1
+; MIPS32-NEXT:    lw $4, 68($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $3, $4, $3
+; MIPS32-NEXT:    move $5, $2
+; MIPS32-NEXT:    move $6, $1
+; MIPS32-NEXT:    move $7, $2
+; MIPS32-NEXT:    move $8, $1
+; MIPS32-NEXT:    sw $1, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 0($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 24($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 20($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $8, 16($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $3, $BB1_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.14: # %b.PHI.2.end
+; MIPS32-NEXT:    lw $1, 0($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($2)
+; MIPS32-NEXT:    ori $3, $zero, 4
+; MIPS32-NEXT:    addu $3, $2, $3
+; MIPS32-NEXT:    lw $4, 4($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $4, 0($3)
+; MIPS32-NEXT:    addiu $sp, $sp, 80
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB1_15: # %b.PHI.3
+; MIPS32-NEXT:    lw $1, 16($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 20($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $3, 24($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $4, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ori $5, $zero, 1
+; MIPS32-NEXT:    lw $6, 64($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $7, $6, $5
+; MIPS32-NEXT:    movn $2, $4, $7
+; MIPS32-NEXT:    movn $1, $3, $7
+; MIPS32-NEXT:    lw $7, 68($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $5, $7, $5
+; MIPS32-NEXT:    move $8, $4
+; MIPS32-NEXT:    movn $8, $2, $5
+; MIPS32-NEXT:    move $2, $3
+; MIPS32-NEXT:    movn $2, $1, $5
+; MIPS32-NEXT:    lw $1, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $8, 0($1)
+; MIPS32-NEXT:    ori $5, $zero, 4
+; MIPS32-NEXT:    addu $5, $1, $5
+; MIPS32-NEXT:    sw $2, 0($5)
+; MIPS32-NEXT:    sw $4, 0($1)
+; MIPS32-NEXT:    ori $2, $zero, 4
+; MIPS32-NEXT:    addu $2, $1, $2
+; MIPS32-NEXT:    sw $3, 0($2)
+; MIPS32-NEXT:    addiu $sp, $sp, 80
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+pre.PHI.1:
+  br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+pre.PHI.1.0:
+  br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+b.PHI.1.0:
+  %phi1.0 = load i64, i64* %a
+  br label %b.PHI.1
+
+b.PHI.1.1:
+  %phi1.1 = load i64, i64* %b
+  br label %b.PHI.1
+
+b.PHI.1.2:
+  %phi1.2 = load i64, i64* %c
+  br label %b.PHI.1
+
+b.PHI.1:
+  %phi1 = phi i64 [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+  br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+b.PHI.1.end:
+  store i64 %phi1, i64* %result
+  ret void
+
+pre.PHI.2:
+  br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+b.PHI.2.0:
+  %phi2.0 = load i64, i64* %a
+  br label %b.PHI.2
+
+b.PHI.2.1:
+  %phi2.1 = load i64, i64* %b
+  br label %b.PHI.2
+
+b.PHI.2:
+  %phi2 = phi i64 [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+   br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+b.PHI.2.end:
+  store i64 %phi2, i64* %result
+  ret void
+
+b.PHI.3:
+  %phi3 = phi i64 [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %phi4 = phi i64 [ %phi2, %b.PHI.2], [ 0, %b.PHI.1 ]
+  %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
+  %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
+  store i64 %sel_3_1.2, i64* %result
+  store i64 %phi3, i64* %result
+  ret void
+}
+
+define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+; MIPS32-LABEL: long_chain_ambiguous_double_in_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -72
+; MIPS32-NEXT:    .cfi_def_cfa_offset 72
+; MIPS32-NEXT:    addiu $1, $sp, 88
+; MIPS32-NEXT:    lw $1, 0($1)
+; MIPS32-NEXT:    addiu $2, $sp, 92
+; MIPS32-NEXT:    lw $2, 0($2)
+; MIPS32-NEXT:    addiu $3, $sp, 96
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    ori $8, $zero, 1
+; MIPS32-NEXT:    and $8, $4, $8
+; MIPS32-NEXT:    sw $1, 68($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 64($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 60($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 56($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 52($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 48($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 44($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $8, $BB2_9
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %pre.PHI.1
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB2_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.2: # %pre.PHI.1.0
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 56($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB2_5
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.3: # %b.PHI.1.0
+; MIPS32-NEXT:    lw $1, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 32($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB2_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_4: # %b.PHI.1.1
+; MIPS32-NEXT:    lw $1, 68($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 32($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB2_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_5: # %b.PHI.1.2
+; MIPS32-NEXT:    lw $1, 48($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 32($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:  $BB2_6: # %b.PHI.1
+; MIPS32-NEXT:    ldc1 $f0, 32($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 56($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    mov.d $f2, $f0
+; MIPS32-NEXT:    sdc1 $f0, 24($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f2, 16($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    bnez $1, $BB2_8
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.7: # %b.PHI.1
+; MIPS32-NEXT:    j $BB2_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_8: # %b.PHI.1.end
+; MIPS32-NEXT:    ldc1 $f0, 24($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 72
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_9: # %pre.PHI.2
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 64($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB2_11
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.10: # %pre.PHI.2
+; MIPS32-NEXT:    j $BB2_12
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_11: # %b.PHI.2.0
+; MIPS32-NEXT:    lw $1, 52($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 8($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB2_13
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_12: # %b.PHI.2.1
+; MIPS32-NEXT:    lw $1, 68($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 8($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:  $BB2_13: # %b.PHI.2
+; MIPS32-NEXT:    ldc1 $f0, 8($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    mov.d $f2, $f0
+; MIPS32-NEXT:    sdc1 $f0, 0($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f2, 16($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    bnez $1, $BB2_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.14: # %b.PHI.2.end
+; MIPS32-NEXT:    ldc1 $f0, 0($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 72
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB2_15: # %b.PHI.3
+; MIPS32-NEXT:    ldc1 $f0, 16($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f2, 16($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 56($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $3, $2, $1
+; MIPS32-NEXT:    movn.d $f0, $f2, $3
+; MIPS32-NEXT:    lw $3, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $3, $1
+; MIPS32-NEXT:    mov.d $f4, $f2
+; MIPS32-NEXT:    movn.d $f4, $f0, $1
+; MIPS32-NEXT:    lw $1, 44($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f4, 0($1)
+; MIPS32-NEXT:    sdc1 $f2, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 72
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+pre.PHI.1:
+  br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+pre.PHI.1.0:
+  br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+b.PHI.1.0:
+  %phi1.0 = load double, double* %a
+  br label %b.PHI.1
+
+b.PHI.1.1:
+  %phi1.1 = load double, double* %b
+  br label %b.PHI.1
+
+b.PHI.1.2:
+  %phi1.2 = load double, double* %c
+  br label %b.PHI.1
+
+b.PHI.1:
+  %phi1 = phi double [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+  br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+b.PHI.1.end:
+  store double %phi1, double* %result
+  ret void
+
+pre.PHI.2:
+  br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+b.PHI.2.0:
+  %phi2.0 = load double, double* %a
+  br label %b.PHI.2
+
+b.PHI.2.1:
+  %phi2.1 = load double, double* %b
+  br label %b.PHI.2
+
+b.PHI.2:
+  %phi2 = phi double [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+   br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+b.PHI.2.end:
+  store double %phi2, double* %result
+  ret void
+
+b.PHI.3:
+  %phi3 = phi double [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %phi4 = phi double [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
+  %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
+  store double %sel_3_1.2, double* %result
+  store double %phi3, double* %result
+  ret void
+}
+
+
+define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+; MIPS32-LABEL: long_chain_double_in_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -88
+; MIPS32-NEXT:    .cfi_def_cfa_offset 88
+; MIPS32-NEXT:    addiu $1, $sp, 104
+; MIPS32-NEXT:    lw $1, 0($1)
+; MIPS32-NEXT:    addiu $2, $sp, 108
+; MIPS32-NEXT:    lw $2, 0($2)
+; MIPS32-NEXT:    addiu $3, $sp, 112
+; MIPS32-NEXT:    lw $3, 0($3)
+; MIPS32-NEXT:    ori $8, $zero, 0
+; MIPS32-NEXT:    ori $9, $zero, 0
+; MIPS32-NEXT:    mtc1 $9, $f0
+; MIPS32-NEXT:    mtc1 $8, $f1
+; MIPS32-NEXT:    ori $8, $zero, 1
+; MIPS32-NEXT:    and $8, $4, $8
+; MIPS32-NEXT:    sw $1, 84($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $4, 80($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $5, 76($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $6, 72($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 68($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 64($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $3, 60($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f0, 48($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    bnez $8, $BB3_9
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %pre.PHI.1
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 76($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB3_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.2: # %pre.PHI.1.0
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 72($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB3_5
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.3: # %b.PHI.1.0
+; MIPS32-NEXT:    lw $1, 68($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 40($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB3_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_4: # %b.PHI.1.1
+; MIPS32-NEXT:    lw $1, 84($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 40($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB3_6
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_5: # %b.PHI.1.2
+; MIPS32-NEXT:    lw $1, 64($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 40($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:  $BB3_6: # %b.PHI.1
+; MIPS32-NEXT:    ldc1 $f0, 40($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 72($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    mov.d $f2, $f0
+; MIPS32-NEXT:    ldc1 $f4, 48($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 32($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f2, 24($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f4, 16($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    bnez $1, $BB3_8
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.7: # %b.PHI.1
+; MIPS32-NEXT:    j $BB3_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_8: # %b.PHI.1.end
+; MIPS32-NEXT:    ldc1 $f0, 32($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    lw $1, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 88
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_9: # %pre.PHI.2
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 80($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    bnez $1, $BB3_11
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.10: # %pre.PHI.2
+; MIPS32-NEXT:    j $BB3_12
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_11: # %b.PHI.2.0
+; MIPS32-NEXT:    lw $1, 68($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 8($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB3_13
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_12: # %b.PHI.2.1
+; MIPS32-NEXT:    lw $1, 84($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f0, 0($1)
+; MIPS32-NEXT:    sdc1 $f0, 8($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:  $BB3_13: # %b.PHI.2
+; MIPS32-NEXT:    ldc1 $f0, 8($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 76($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $2, $1
+; MIPS32-NEXT:    mov.d $f2, $f0
+; MIPS32-NEXT:    mov.d $f4, $f0
+; MIPS32-NEXT:    sdc1 $f0, 0($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f2, 24($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f4, 16($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    bnez $1, $BB3_15
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.14: # %b.PHI.2.end
+; MIPS32-NEXT:    ldc1 $f0, 0($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    lw $1, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 88
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB3_15: # %b.PHI.3
+; MIPS32-NEXT:    ldc1 $f0, 16($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ldc1 $f2, 24($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    lw $2, 72($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $3, $2, $1
+; MIPS32-NEXT:    movn.d $f0, $f2, $3
+; MIPS32-NEXT:    lw $3, 76($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    and $1, $3, $1
+; MIPS32-NEXT:    mov.d $f4, $f2
+; MIPS32-NEXT:    movn.d $f4, $f0, $1
+; MIPS32-NEXT:    lw $1, 60($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f4, 0($1)
+; MIPS32-NEXT:    sdc1 $f2, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 88
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+pre.PHI.1:
+  br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+pre.PHI.1.0:
+  br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+b.PHI.1.0:
+  %phi1.0 = load double, double* %a
+  br label %b.PHI.1
+
+b.PHI.1.1:
+  %phi1.1 = load double, double* %b
+  br label %b.PHI.1
+
+b.PHI.1.2:
+  %phi1.2 = load double, double* %c
+  br label %b.PHI.1
+
+b.PHI.1:
+  %phi1 = phi double [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+  br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+b.PHI.1.end:
+  store double %phi1, double* %result
+  ret void
+
+pre.PHI.2:
+  br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+b.PHI.2.0:
+  %phi2.0 = load double, double* %a
+  br label %b.PHI.2
+
+b.PHI.2.1:
+  %phi2.1 = load double, double* %b
+  br label %b.PHI.2
+
+b.PHI.2:
+  %phi2 = phi double [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+   br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+b.PHI.2.end:
+  store double %phi2, double* %result
+  ret void
+
+b.PHI.3:
+  %phi3 = phi double [ %phi2, %b.PHI.2], [ %phi1, %b.PHI.1 ]
+  %phi4 = phi double [ %phi2, %b.PHI.2], [ 0.0, %b.PHI.1 ]
+  %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
+  %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
+  store double %sel_3_1.2, double* %result
+  store double %phi3, double* %result
+  ret void
+}
+

Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll?rev=365743&r1=365742&r2=365743&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/phi.ll Thu Jul 11 02:22:49 2019
@@ -222,6 +222,55 @@ cond.end:
   ret i64 %cond
 }
 
+define void @phi_ambiguous_i64_in_fpr(i1 %cnd, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {
+; MIPS32-LABEL: phi_ambiguous_i64_in_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -32
+; MIPS32-NEXT:    .cfi_def_cfa_offset 32
+; MIPS32-NEXT:    ldc1 $f0, 0($5)
+; MIPS32-NEXT:    ldc1 $f2, 0($6)
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    and $1, $4, $1
+; MIPS32-NEXT:    sw $7, 28($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f0, 16($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    sdc1 $f2, 8($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    bnez $1, $BB5_2
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %entry
+; MIPS32-NEXT:    j $BB5_3
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB5_2: # %cond.true
+; MIPS32-NEXT:    ldc1 $f0, 16($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 0($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:    j $BB5_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB5_3: # %cond.false
+; MIPS32-NEXT:    ldc1 $f0, 8($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 0($sp) # 8-byte Folded Spill
+; MIPS32-NEXT:  $BB5_4: # %cond.end
+; MIPS32-NEXT:    ldc1 $f0, 0($sp) # 8-byte Folded Reload
+; MIPS32-NEXT:    lw $1, 28($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sdc1 $f0, 0($1)
+; MIPS32-NEXT:    addiu $sp, $sp, 32
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load i64, i64* %i64_ptr_a, align 4
+  %1 = load i64, i64* %i64_ptr_b, align 4
+  br i1 %cnd, label %cond.true, label %cond.false
+
+cond.true:
+  br label %cond.end
+
+cond.false:
+  br label %cond.end
+
+cond.end:
+  %cond = phi i64 [ %0, %cond.true ], [ %1, %cond.false ]
+  store i64 %cond, i64* %i64_ptr_c, align 4
+  ret void
+}
+
 define float @phi_float(i1 %cnd, float %a, float %b) {
 ; MIPS32-LABEL: phi_float:
 ; MIPS32:       # %bb.0: # %entry
@@ -233,20 +282,20 @@ define float @phi_float(i1 %cnd, float %
 ; MIPS32-NEXT:    and $1, $4, $1
 ; MIPS32-NEXT:    swc1 $f0, 12($sp) # 4-byte Folded Spill
 ; MIPS32-NEXT:    swc1 $f1, 8($sp) # 4-byte Folded Spill
-; MIPS32-NEXT:    bnez $1, $BB5_2
+; MIPS32-NEXT:    bnez $1, $BB6_2
 ; MIPS32-NEXT:    nop
 ; MIPS32-NEXT:  # %bb.1: # %entry
-; MIPS32-NEXT:    j $BB5_3
+; MIPS32-NEXT:    j $BB6_3
 ; MIPS32-NEXT:    nop
-; MIPS32-NEXT:  $BB5_2: # %cond.true
+; MIPS32-NEXT:  $BB6_2: # %cond.true
 ; MIPS32-NEXT:    lwc1 $f0, 12($sp) # 4-byte Folded Reload
 ; MIPS32-NEXT:    swc1 $f0, 4($sp) # 4-byte Folded Spill
-; MIPS32-NEXT:    j $BB5_4
+; MIPS32-NEXT:    j $BB6_4
 ; MIPS32-NEXT:    nop
-; MIPS32-NEXT:  $BB5_3: # %cond.false
+; MIPS32-NEXT:  $BB6_3: # %cond.false
 ; MIPS32-NEXT:    lwc1 $f0, 8($sp) # 4-byte Folded Reload
 ; MIPS32-NEXT:    swc1 $f0, 4($sp) # 4-byte Folded Spill
-; MIPS32-NEXT:  $BB5_4: # %cond.end
+; MIPS32-NEXT:  $BB6_4: # %cond.end
 ; MIPS32-NEXT:    lwc1 $f0, 4($sp) # 4-byte Folded Reload
 ; MIPS32-NEXT:    addiu $sp, $sp, 16
 ; MIPS32-NEXT:    jr $ra
@@ -265,6 +314,55 @@ cond.end:
   ret float %cond
 }
 
+define void @phi_ambiguous_float_in_gpr(i1 %cnd, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {
+; MIPS32-LABEL: phi_ambiguous_float_in_gpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    addiu $sp, $sp, -16
+; MIPS32-NEXT:    .cfi_def_cfa_offset 16
+; MIPS32-NEXT:    lw $1, 0($5)
+; MIPS32-NEXT:    lw $2, 0($6)
+; MIPS32-NEXT:    ori $3, $zero, 1
+; MIPS32-NEXT:    and $3, $4, $3
+; MIPS32-NEXT:    sw $1, 12($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $7, 8($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    sw $2, 4($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    bnez $3, $BB7_2
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  # %bb.1: # %entry
+; MIPS32-NEXT:    j $BB7_3
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB7_2: # %cond.true
+; MIPS32-NEXT:    lw $1, 12($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:    j $BB7_4
+; MIPS32-NEXT:    nop
+; MIPS32-NEXT:  $BB7_3: # %cond.false
+; MIPS32-NEXT:    lw $1, 4($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($sp) # 4-byte Folded Spill
+; MIPS32-NEXT:  $BB7_4: # %cond.end
+; MIPS32-NEXT:    lw $1, 0($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    lw $2, 8($sp) # 4-byte Folded Reload
+; MIPS32-NEXT:    sw $1, 0($2)
+; MIPS32-NEXT:    addiu $sp, $sp, 16
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load float, float* %f32_ptr_a, align 4
+  %1 = load float, float* %f32_ptr_b, align 4
+  br i1 %cnd, label %cond.true, label %cond.false
+
+cond.true:
+  br label %cond.end
+
+cond.false:
+  br label %cond.end
+
+cond.end:
+  %cond = phi float [ %0, %cond.true ], [ %1, %cond.false ]
+  store float %cond, float* %f32_ptr_c, align 4
+  ret void
+}
+
 define double @phi_double(double %a, double %b, i1 %cnd) {
 ; MIPS32-LABEL: phi_double:
 ; MIPS32:       # %bb.0: # %entry
@@ -276,20 +374,20 @@ define double @phi_double(double %a, dou
 ; MIPS32-NEXT:    and $1, $1, $2
 ; MIPS32-NEXT:    sdc1 $f12, 16($sp) # 8-byte Folded Spill
 ; MIPS32-NEXT:    sdc1 $f14, 8($sp) # 8-byte Folded Spill
-; MIPS32-NEXT:    bnez $1, $BB6_2
+; MIPS32-NEXT:    bnez $1, $BB8_2
 ; MIPS32-NEXT:    nop
 ; MIPS32-NEXT:  # %bb.1: # %entry
-; MIPS32-NEXT:    j $BB6_3
+; MIPS32-NEXT:    j $BB8_3
 ; MIPS32-NEXT:    nop
-; MIPS32-NEXT:  $BB6_2: # %cond.true
+; MIPS32-NEXT:  $BB8_2: # %cond.true
 ; MIPS32-NEXT:    ldc1 $f0, 16($sp) # 8-byte Folded Reload
 ; MIPS32-NEXT:    sdc1 $f0, 0($sp) # 8-byte Folded Spill
-; MIPS32-NEXT:    j $BB6_4
+; MIPS32-NEXT:    j $BB8_4
 ; MIPS32-NEXT:    nop
-; MIPS32-NEXT:  $BB6_3: # %cond.false
+; MIPS32-NEXT:  $BB8_3: # %cond.false
 ; MIPS32-NEXT:    ldc1 $f0, 8($sp) # 8-byte Folded Reload
 ; MIPS32-NEXT:    sdc1 $f0, 0($sp) # 8-byte Folded Spill
-; MIPS32-NEXT:  $BB6_4: # %cond.end
+; MIPS32-NEXT:  $BB8_4: # %cond.end
 ; MIPS32-NEXT:    ldc1 $f0, 0($sp) # 8-byte Folded Reload
 ; MIPS32-NEXT:    addiu $sp, $sp, 24
 ; MIPS32-NEXT:    jr $ra

Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll?rev=365743&r1=365742&r2=365743&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/select.ll Thu Jul 11 02:22:49 2019
@@ -99,6 +99,25 @@ entry:
   ret i64 %cond
 }
 
+define void @select_ambiguous_i64_in_fpr(i1 %test, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {
+; MIPS32-LABEL: select_ambiguous_i64_in_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    ldc1 $f0, 0($5)
+; MIPS32-NEXT:    ldc1 $f2, 0($6)
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    and $1, $4, $1
+; MIPS32-NEXT:    movn.d $f2, $f0, $1
+; MIPS32-NEXT:    sdc1 $f2, 0($7)
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load i64, i64* %i64_ptr_a, align 8
+  %1 = load i64, i64* %i64_ptr_b, align 8
+  %cond = select i1 %test, i64 %0, i64 %1
+  store i64 %cond, i64* %i64_ptr_c, align 8
+  ret void
+}
+
 define float @select_float(i1 %test, float %a, float %b) {
 ; MIPS32-LABEL: select_float:
 ; MIPS32:       # %bb.0: # %entry
@@ -115,6 +134,25 @@ entry:
   ret float %cond
 }
 
+define void @select_ambiguous_float_in_gpr(i1 %test, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {
+; MIPS32-LABEL: select_ambiguous_float_in_gpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lw $1, 0($5)
+; MIPS32-NEXT:    lw $2, 0($6)
+; MIPS32-NEXT:    ori $3, $zero, 1
+; MIPS32-NEXT:    and $3, $4, $3
+; MIPS32-NEXT:    movn $2, $1, $3
+; MIPS32-NEXT:    sw $2, 0($7)
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load float, float* %f32_ptr_a, align 4
+  %1 = load float, float* %f32_ptr_b, align 4
+  %cond = select i1 %test, float %0, float %1
+  store float %cond, float* %f32_ptr_c, align 4
+  ret void
+}
+
 define double @select_double(double %a, double %b, i1 %test) {
 ; MIPS32-LABEL: select_double:
 ; MIPS32:       # %bb.0: # %entry

Added: llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll?rev=365743&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll (added)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/llvm-ir/test_TypeInfoforMF.ll Thu Jul 11 02:22:49 2019
@@ -0,0 +1,123 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc  -O0 -mtriple=mipsel-linux-gnu -global-isel  -verify-machineinstrs %s -o -| FileCheck %s -check-prefixes=MIPS32
+
+define i32 @outgoing_gpr(i32* %i32_ptr) {
+; MIPS32-LABEL: outgoing_gpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lw $2, 0($4)
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load i32, i32* %i32_ptr
+  ret i32 %0
+}
+
+define float @outgoing_fpr(float* %float_ptr) {
+; MIPS32-LABEL: outgoing_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lwc1 $f0, 0($4)
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load float, float* %float_ptr
+  ret float %0
+}
+
+define i32 @outgoing_gpr_instr(i32* %i32_ptr1, i32* %i32_ptr2) {
+; MIPS32-LABEL: outgoing_gpr_instr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lw $1, 0($4)
+; MIPS32-NEXT:    lw $2, 0($5)
+; MIPS32-NEXT:    addu $2, $2, $1
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load i32, i32* %i32_ptr1
+  %1 = load i32, i32* %i32_ptr2
+  %outgoing_instr = add i32 %1, %0
+  ret i32 %outgoing_instr
+}
+
+define float @outgoing_fpr_instr(float* %float_ptr1, float* %float_ptr2) {
+; MIPS32-LABEL: outgoing_fpr_instr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lwc1 $f0, 0($4)
+; MIPS32-NEXT:    lwc1 $f1, 0($5)
+; MIPS32-NEXT:    add.s $f0, $f0, $f1
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load float, float* %float_ptr1
+  %1 = load float, float* %float_ptr2
+  %outgoing_instr = fadd float %0, %1
+  ret float %outgoing_instr
+}
+
+define i32 @incoming_gpr(i32 %incoming_phys_reg, i1 %test, i32* %a) {
+; MIPS32-LABEL: incoming_gpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lw $1, 0($6)
+; MIPS32-NEXT:    ori $2, $zero, 1
+; MIPS32-NEXT:    and $2, $5, $2
+; MIPS32-NEXT:    movn $4, $1, $2
+; MIPS32-NEXT:    move $2, $4
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load i32, i32* %a
+  %cond = select i1 %test, i32 %0, i32 %incoming_phys_reg
+  ret i32 %cond
+}
+
+define float @incoming_fpr(float %incoming_phys_reg, i1 %test, float* %a) {
+; MIPS32-LABEL: incoming_fpr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lwc1 $f0, 0($6)
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    and $1, $5, $1
+; MIPS32-NEXT:    movn.s $f12, $f0, $1
+; MIPS32-NEXT:    mov.s $f0, $f12
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load float, float* %a
+  %cond = select i1 %test, float %0, float %incoming_phys_reg
+  ret float %cond
+}
+
+
+define i32 @incoming_i32_instr(i32 %val1, i32 %val2, i32* %i32_ptr, i1 %test) {
+; MIPS32-LABEL: incoming_i32_instr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lw $1, 0($6)
+; MIPS32-NEXT:    addu $2, $5, $4
+; MIPS32-NEXT:    ori $3, $zero, 1
+; MIPS32-NEXT:    and $3, $7, $3
+; MIPS32-NEXT:    movn $2, $1, $3
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load i32, i32* %i32_ptr
+  %incoming_instr = add i32 %val2, %val1
+  %cond = select i1 %test, i32 %0, i32 %incoming_instr
+  ret i32 %cond
+}
+
+define float @incoming_float_instr(float %val1, float %val2, float* %float_ptr, i1 %test) {
+; MIPS32-LABEL: incoming_float_instr:
+; MIPS32:       # %bb.0: # %entry
+; MIPS32-NEXT:    lwc1 $f0, 0($6)
+; MIPS32-NEXT:    add.s $f1, $f14, $f12
+; MIPS32-NEXT:    ori $1, $zero, 1
+; MIPS32-NEXT:    and $1, $7, $1
+; MIPS32-NEXT:    movn.s $f1, $f0, $1
+; MIPS32-NEXT:    mov.s $f0, $f1
+; MIPS32-NEXT:    jr $ra
+; MIPS32-NEXT:    nop
+entry:
+  %0 = load float, float* %float_ptr
+  %incoming_instr = fadd float %val2, %val1
+  %cond = select i1 %test, float %0, float %incoming_instr
+  ret float %cond
+}
+

Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir?rev=365743&r1=365742&r2=365743&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/load.mir Thu Jul 11 02:22:49 2019
@@ -4,7 +4,9 @@
 
   define void @load_i32(i32* %ptr) {entry: ret void}
   define void @load_i64(i64* %ptr) {entry: ret void}
+  define void @load_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b) {entry: ret void}
   define void @load_float(float* %ptr) {entry: ret void}
+  define void @load_ambiguous_float_in_gpr(float* %float_ptr_a, float* %float_ptr_b) {entry: ret void}
   define void @load_double(double* %ptr) {entry: ret void}
 
 ...
@@ -57,6 +59,29 @@ body:             |
 
 ...
 ---
+name:            load_ambiguous_i64_in_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: load_ambiguous_i64_in_fpr
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY]](p0) :: (load 8 from %ir.i64_ptr_a)
+    ; MIPS32: G_STORE [[LOAD]](s64), [[COPY1]](p0) :: (store 8 into %ir.i64_ptr_b)
+    ; MIPS32: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(s64) = G_LOAD %0(p0) :: (load 8 from %ir.i64_ptr_a)
+    G_STORE %2(s64), %1(p0) :: (store 8 into %ir.i64_ptr_b)
+    RetRA
+
+...
+---
 name:            load_float
 alignment:       2
 legalized:       true
@@ -78,6 +103,29 @@ body:             |
 
 ...
 ---
+name:            load_ambiguous_float_in_gpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: load_ambiguous_float_in_gpr
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.float_ptr_a)
+    ; MIPS32: G_STORE [[LOAD]](s32), [[COPY1]](p0) :: (store 4 into %ir.float_ptr_b)
+    ; MIPS32: RetRA
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.float_ptr_a)
+    G_STORE %2(s32), %1(p0) :: (store 4 into %ir.float_ptr_b)
+    RetRA
+
+...
+---
 name:            load_double
 alignment:       2
 legalized:       true

Added: llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir?rev=365743&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir (added)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s32.mir Thu Jul 11 02:22:49 2019
@@ -0,0 +1,1047 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+--- |
+
+  define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+  entry:
+    br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+  pre.PHI.1:                                        ; preds = %entry
+    br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+  pre.PHI.1.0:                                      ; preds = %pre.PHI.1
+    br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+  b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
+    %phi1.0 = load i64, i64* %a
+    br label %b.PHI.1
+
+  b.PHI.1.1:                                        ; preds = %pre.PHI.1
+    %phi1.1 = load i64, i64* %b
+    br label %b.PHI.1
+
+  b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
+    %phi1.2 = load i64, i64* %c
+    br label %b.PHI.1
+
+  b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
+    %phi1 = phi i64 [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+    br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+  b.PHI.1.end:                                      ; preds = %b.PHI.1
+    store i64 %phi1, i64* %result
+    ret void
+
+  pre.PHI.2:                                        ; preds = %entry
+    br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+  b.PHI.2.0:                                        ; preds = %pre.PHI.2
+    %phi2.0 = load i64, i64* %a
+    br label %b.PHI.2
+
+  b.PHI.2.1:                                        ; preds = %pre.PHI.2
+    %phi2.1 = load i64, i64* %b
+    br label %b.PHI.2
+
+  b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
+    %phi2 = phi i64 [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+  b.PHI.2.end:                                      ; preds = %b.PHI.2
+    store i64 %phi2, i64* %result
+    ret void
+
+  b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
+    %phi3 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
+    %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
+    store i64 %sel_3_1.2, i64* %result
+    store i64 %phi3, i64* %result
+    ret void
+  }
+
+  define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+  entry:
+    br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+  pre.PHI.1:                                        ; preds = %entry
+    br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+  pre.PHI.1.0:                                      ; preds = %pre.PHI.1
+    br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+  b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
+    %phi1.0 = load i64, i64* %a
+    br label %b.PHI.1
+
+  b.PHI.1.1:                                        ; preds = %pre.PHI.1
+    %phi1.1 = load i64, i64* %b
+    br label %b.PHI.1
+
+  b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
+    %phi1.2 = load i64, i64* %c
+    br label %b.PHI.1
+
+  b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
+    %phi1 = phi i64 [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+    br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+  b.PHI.1.end:                                      ; preds = %b.PHI.1
+    store i64 %phi1, i64* %result
+    ret void
+
+  pre.PHI.2:                                        ; preds = %entry
+    br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+  b.PHI.2.0:                                        ; preds = %pre.PHI.2
+    %phi2.0 = load i64, i64* %a
+    br label %b.PHI.2
+
+  b.PHI.2.1:                                        ; preds = %pre.PHI.2
+    %phi2.1 = load i64, i64* %b
+    br label %b.PHI.2
+
+  b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
+    %phi2 = phi i64 [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+  b.PHI.2.end:                                      ; preds = %b.PHI.2
+    store i64 %phi2, i64* %result
+    ret void
+
+  b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
+    %phi3 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ 0, %b.PHI.1 ]
+    %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
+    %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
+    store i64 %sel_3_1.2, i64* %result
+    store i64 %phi3, i64* %result
+    ret void
+  }
+
+  define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+  entry:
+    br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+  pre.PHI.1:                                        ; preds = %entry
+    br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+  pre.PHI.1.0:                                      ; preds = %pre.PHI.1
+    br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+  b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
+    %phi1.0 = load double, double* %a
+    br label %b.PHI.1
+
+  b.PHI.1.1:                                        ; preds = %pre.PHI.1
+    %phi1.1 = load double, double* %b
+    br label %b.PHI.1
+
+  b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
+    %phi1.2 = load double, double* %c
+    br label %b.PHI.1
+
+  b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
+    %phi1 = phi double [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+    br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+  b.PHI.1.end:                                      ; preds = %b.PHI.1
+    store double %phi1, double* %result
+    ret void
+
+  pre.PHI.2:                                        ; preds = %entry
+    br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+  b.PHI.2.0:                                        ; preds = %pre.PHI.2
+    %phi2.0 = load double, double* %a
+    br label %b.PHI.2
+
+  b.PHI.2.1:                                        ; preds = %pre.PHI.2
+    %phi2.1 = load double, double* %b
+    br label %b.PHI.2
+
+  b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
+    %phi2 = phi double [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+  b.PHI.2.end:                                      ; preds = %b.PHI.2
+    store double %phi2, double* %result
+    ret void
+
+  b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
+    %phi3 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %phi4 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
+    %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
+    store double %sel_3_1.2, double* %result
+    store double %phi3, double* %result
+    ret void
+  }
+
+  define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+  entry:
+    br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+  pre.PHI.1:                                        ; preds = %entry
+    br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+  pre.PHI.1.0:                                      ; preds = %pre.PHI.1
+    br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+  b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
+    %phi1.0 = load double, double* %a
+    br label %b.PHI.1
+
+  b.PHI.1.1:                                        ; preds = %pre.PHI.1
+    %phi1.1 = load double, double* %b
+    br label %b.PHI.1
+
+  b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
+    %phi1.2 = load double, double* %c
+    br label %b.PHI.1
+
+  b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
+    %phi1 = phi double [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+    br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+  b.PHI.1.end:                                      ; preds = %b.PHI.1
+    store double %phi1, double* %result
+    ret void
+
+  pre.PHI.2:                                        ; preds = %entry
+    br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+  b.PHI.2.0:                                        ; preds = %pre.PHI.2
+    %phi2.0 = load double, double* %a
+    br label %b.PHI.2
+
+  b.PHI.2.1:                                        ; preds = %pre.PHI.2
+    %phi2.1 = load double, double* %b
+    br label %b.PHI.2
+
+  b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
+    %phi2 = phi double [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+  b.PHI.2.end:                                      ; preds = %b.PHI.2
+    store double %phi2, double* %result
+    ret void
+
+  b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
+    %phi3 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %phi4 = phi double [ %phi2, %b.PHI.2 ], [ 0.000000e+00, %b.PHI.1 ]
+    %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
+    %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
+    store double %sel_3_1.2, double* %result
+    store double %phi3, double* %result
+    ret void
+  }
+
+...
+---
+name:            long_chain_ambiguous_i64_in_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+fixedStack:
+  - { id: 0, offset: 24, size: 4, alignment: 8, isImmutable: true }
+  - { id: 1, offset: 20, size: 4, alignment: 4, isImmutable: true }
+  - { id: 2, offset: 16, size: 4, alignment: 8, isImmutable: true }
+body:             |
+  ; MIPS32-LABEL: name: long_chain_ambiguous_i64_in_fpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.8(0x40000000), %bb.1(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; MIPS32:   [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
+  ; MIPS32:   [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
+  ; MIPS32:   [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; MIPS32:   [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)
+  ; MIPS32:   [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.8
+  ; MIPS32: bb.1.pre.PHI.1:
+  ; MIPS32:   successors: %bb.4(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+  ; MIPS32:   G_BRCOND [[AND1]](s32), %bb.4
+  ; MIPS32: bb.2.pre.PHI.1.0:
+  ; MIPS32:   successors: %bb.5(0x40000000), %bb.3(0x40000000)
+  ; MIPS32:   [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+  ; MIPS32:   G_BRCOND [[AND2]](s32), %bb.5
+  ; MIPS32: bb.3.b.PHI.1.0:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.4.b.PHI.1.1:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.5.b.PHI.1.2:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.c)
+  ; MIPS32: bb.6.b.PHI.1:
+  ; MIPS32:   successors: %bb.7(0x40000000), %bb.13(0x40000000)
+  ; MIPS32:   [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+  ; MIPS32:   [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+  ; MIPS32:   G_BRCOND [[AND3]](s32), %bb.7
+  ; MIPS32:   G_BR %bb.13
+  ; MIPS32: bb.7.b.PHI.1.end:
+  ; MIPS32:   G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.8.pre.PHI.2:
+  ; MIPS32:   successors: %bb.9(0x40000000), %bb.10(0x40000000)
+  ; MIPS32:   [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+  ; MIPS32:   G_BRCOND [[AND4]](s32), %bb.9
+  ; MIPS32:   G_BR %bb.10
+  ; MIPS32: bb.9.b.PHI.2.0:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.11
+  ; MIPS32: bb.10.b.PHI.2.1:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32: bb.11.b.PHI.2:
+  ; MIPS32:   successors: %bb.13(0x40000000), %bb.12(0x40000000)
+  ; MIPS32:   [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+  ; MIPS32:   [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+  ; MIPS32:   G_BRCOND [[AND5]](s32), %bb.13
+  ; MIPS32: bb.12.b.PHI.2.end:
+  ; MIPS32:   G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.13.b.PHI.3:
+  ; MIPS32:   [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+  ; MIPS32:   [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+  ; MIPS32:   [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+  ; MIPS32:   [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+  ; MIPS32:   G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %7:_(s32) = COPY $a0
+    %8:_(s32) = COPY $a1
+    %9:_(s32) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %10:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    %4:_(p0) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.2, align 8)
+    %11:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    %5:_(p0) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.1)
+    %12:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    %6:_(p0) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.0, align 8)
+    %32:_(s32) = G_CONSTANT i32 1
+    %33:_(s32) = COPY %7(s32)
+    %31:_(s32) = G_AND %33, %32
+    G_BRCOND %31(s32), %bb.9
+
+  bb.2.pre.PHI.1:
+    %34:_(s32) = G_CONSTANT i32 1
+    %35:_(s32) = COPY %8(s32)
+    %30:_(s32) = G_AND %35, %34
+    G_BRCOND %30(s32), %bb.5
+
+  bb.3.pre.PHI.1.0:
+    %36:_(s32) = G_CONSTANT i32 1
+    %37:_(s32) = COPY %9(s32)
+    %29:_(s32) = G_AND %37, %36
+    G_BRCOND %29(s32), %bb.6
+
+  bb.4.b.PHI.1.0:
+    %13:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.7
+
+  bb.5.b.PHI.1.1:
+    %15:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+    G_BR %bb.7
+
+  bb.6.b.PHI.1.2:
+    %14:_(s64) = G_LOAD %5(p0) :: (load 8 from %ir.c)
+
+  bb.7.b.PHI.1:
+    %16:_(s64) = G_PHI %13(s64), %bb.4, %15(s64), %bb.5, %14(s64), %bb.6
+    %38:_(s32) = G_CONSTANT i32 1
+    %39:_(s32) = COPY %9(s32)
+    %28:_(s32) = G_AND %39, %38
+    G_BRCOND %28(s32), %bb.8
+    G_BR %bb.14
+
+  bb.8.b.PHI.1.end:
+    G_STORE %16(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.9.pre.PHI.2:
+    %40:_(s32) = G_CONSTANT i32 1
+    %41:_(s32) = COPY %7(s32)
+    %27:_(s32) = G_AND %41, %40
+    G_BRCOND %27(s32), %bb.10
+    G_BR %bb.11
+
+  bb.10.b.PHI.2.0:
+    %18:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.12
+
+  bb.11.b.PHI.2.1:
+    %17:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+
+  bb.12.b.PHI.2:
+    %19:_(s64) = G_PHI %18(s64), %bb.10, %17(s64), %bb.11
+    %42:_(s32) = G_CONSTANT i32 1
+    %43:_(s32) = COPY %8(s32)
+    %26:_(s32) = G_AND %43, %42
+    G_BRCOND %26(s32), %bb.14
+
+  bb.13.b.PHI.2.end:
+    G_STORE %19(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.14.b.PHI.3:
+    %20:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %21:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %44:_(s32) = G_CONSTANT i32 1
+    %45:_(s32) = COPY %9(s32)
+    %25:_(s32) = G_AND %45, %44
+    %22:_(s64) = G_SELECT %25(s32), %20, %21
+    %46:_(s32) = COPY %8(s32)
+    %24:_(s32) = G_AND %46, %44
+    %23:_(s64) = G_SELECT %24(s32), %22, %20
+    G_STORE %23(s64), %6(p0) :: (store 8 into %ir.result)
+    G_STORE %20(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+...
+---
+name:            long_chain_i64_in_gpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+fixedStack:
+  - { id: 0, offset: 24, size: 4, alignment: 8, isImmutable: true }
+  - { id: 1, offset: 20, size: 4, alignment: 4, isImmutable: true }
+  - { id: 2, offset: 16, size: 4, alignment: 8, isImmutable: true }
+body:             |
+  ; MIPS32-LABEL: name: long_chain_i64_in_gpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.8(0x40000000), %bb.1(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; MIPS32:   [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
+  ; MIPS32:   [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
+  ; MIPS32:   [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; MIPS32:   [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)
+  ; MIPS32:   [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+  ; MIPS32:   [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.8
+  ; MIPS32: bb.1.pre.PHI.1:
+  ; MIPS32:   successors: %bb.4(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+  ; MIPS32:   G_BRCOND [[AND1]](s32), %bb.4
+  ; MIPS32: bb.2.pre.PHI.1.0:
+  ; MIPS32:   successors: %bb.5(0x40000000), %bb.3(0x40000000)
+  ; MIPS32:   [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+  ; MIPS32:   G_BRCOND [[AND2]](s32), %bb.5
+  ; MIPS32: bb.3.b.PHI.1.0:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load 4 from %ir.a, align 8)
+  ; MIPS32:   [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP:%[0-9]+]]:gprb(p0) = G_GEP [[COPY3]], [[C4]](s32)
+  ; MIPS32:   [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP]](p0) :: (load 4 from %ir.a + 4, align 8)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.4.b.PHI.1.1:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.b, align 8)
+  ; MIPS32:   [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP1:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD]], [[C5]](s32)
+  ; MIPS32:   [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.b + 4, align 8)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.5.b.PHI.1.2:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.c, align 8)
+  ; MIPS32:   [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP2:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD1]], [[C6]](s32)
+  ; MIPS32:   [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.c + 4, align 8)
+  ; MIPS32: bb.6.b.PHI.1:
+  ; MIPS32:   successors: %bb.7(0x40000000), %bb.13(0x40000000)
+  ; MIPS32:   [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
+  ; MIPS32:   [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
+  ; MIPS32:   [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
+  ; MIPS32:   G_BRCOND [[AND3]](s32), %bb.7
+  ; MIPS32:   G_BR %bb.13
+  ; MIPS32: bb.7.b.PHI.1.end:
+  ; MIPS32:   G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
+  ; MIPS32:   [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP3:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD2]], [[C8]](s32)
+  ; MIPS32:   G_STORE [[PHI1]](s32), [[GEP3]](p0) :: (store 4 into %ir.result + 4, align 8)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.8.pre.PHI.2:
+  ; MIPS32:   successors: %bb.9(0x40000000), %bb.10(0x40000000)
+  ; MIPS32:   [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
+  ; MIPS32:   G_BRCOND [[AND4]](s32), %bb.9
+  ; MIPS32:   G_BR %bb.10
+  ; MIPS32: bb.9.b.PHI.2.0:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load 4 from %ir.a, align 8)
+  ; MIPS32:   [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP4:%[0-9]+]]:gprb(p0) = G_GEP [[COPY3]], [[C10]](s32)
+  ; MIPS32:   [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.a + 4, align 8)
+  ; MIPS32:   G_BR %bb.11
+  ; MIPS32: bb.10.b.PHI.2.1:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.b, align 8)
+  ; MIPS32:   [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP5:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD]], [[C11]](s32)
+  ; MIPS32:   [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP5]](p0) :: (load 4 from %ir.b + 4, align 8)
+  ; MIPS32: bb.11.b.PHI.2:
+  ; MIPS32:   successors: %bb.13(0x40000000), %bb.12(0x40000000)
+  ; MIPS32:   [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
+  ; MIPS32:   [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
+  ; MIPS32:   [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
+  ; MIPS32:   G_BRCOND [[AND5]](s32), %bb.13
+  ; MIPS32: bb.12.b.PHI.2.end:
+  ; MIPS32:   G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
+  ; MIPS32:   [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP6:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD2]], [[C13]](s32)
+  ; MIPS32:   G_STORE [[PHI3]](s32), [[GEP6]](p0) :: (store 4 into %ir.result + 4, align 8)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.13.b.PHI.3:
+  ; MIPS32:   [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
+  ; MIPS32:   [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
+  ; MIPS32:   [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
+  ; MIPS32:   [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
+  ; MIPS32:   [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
+  ; MIPS32:   [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
+  ; MIPS32:   [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
+  ; MIPS32:   [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
+  ; MIPS32:   [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
+  ; MIPS32:   [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
+  ; MIPS32:   G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
+  ; MIPS32:   [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP7:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD2]], [[C15]](s32)
+  ; MIPS32:   G_STORE [[SELECT3]](s32), [[GEP7]](p0) :: (store 4 into %ir.result + 4, align 8)
+  ; MIPS32:   G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
+  ; MIPS32:   [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP8:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD2]], [[C16]](s32)
+  ; MIPS32:   G_STORE [[PHI5]](s32), [[GEP8]](p0) :: (store 4 into %ir.result + 4, align 8)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %7:_(s32) = COPY $a0
+    %8:_(s32) = COPY $a1
+    %9:_(s32) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %10:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    %4:_(p0) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.2, align 8)
+    %11:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    %5:_(p0) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.1)
+    %12:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    %6:_(p0) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.0, align 8)
+    %33:_(s32) = G_CONSTANT i32 0
+    %24:_(s64) = G_MERGE_VALUES %33(s32), %33(s32)
+    %34:_(s32) = G_CONSTANT i32 1
+    %35:_(s32) = COPY %7(s32)
+    %32:_(s32) = G_AND %35, %34
+    G_BRCOND %32(s32), %bb.9
+
+  bb.2.pre.PHI.1:
+    %36:_(s32) = G_CONSTANT i32 1
+    %37:_(s32) = COPY %8(s32)
+    %31:_(s32) = G_AND %37, %36
+    G_BRCOND %31(s32), %bb.5
+
+  bb.3.pre.PHI.1.0:
+    %38:_(s32) = G_CONSTANT i32 1
+    %39:_(s32) = COPY %9(s32)
+    %30:_(s32) = G_AND %39, %38
+    G_BRCOND %30(s32), %bb.6
+
+  bb.4.b.PHI.1.0:
+    %13:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.7
+
+  bb.5.b.PHI.1.1:
+    %15:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+    G_BR %bb.7
+
+  bb.6.b.PHI.1.2:
+    %14:_(s64) = G_LOAD %5(p0) :: (load 8 from %ir.c)
+
+  bb.7.b.PHI.1:
+    %16:_(s64) = G_PHI %13(s64), %bb.4, %15(s64), %bb.5, %14(s64), %bb.6
+    %40:_(s32) = G_CONSTANT i32 1
+    %41:_(s32) = COPY %9(s32)
+    %29:_(s32) = G_AND %41, %40
+    G_BRCOND %29(s32), %bb.8
+    G_BR %bb.14
+
+  bb.8.b.PHI.1.end:
+    G_STORE %16(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.9.pre.PHI.2:
+    %42:_(s32) = G_CONSTANT i32 1
+    %43:_(s32) = COPY %7(s32)
+    %28:_(s32) = G_AND %43, %42
+    G_BRCOND %28(s32), %bb.10
+    G_BR %bb.11
+
+  bb.10.b.PHI.2.0:
+    %18:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.12
+
+  bb.11.b.PHI.2.1:
+    %17:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+
+  bb.12.b.PHI.2:
+    %19:_(s64) = G_PHI %18(s64), %bb.10, %17(s64), %bb.11
+    %44:_(s32) = G_CONSTANT i32 1
+    %45:_(s32) = COPY %8(s32)
+    %27:_(s32) = G_AND %45, %44
+    G_BRCOND %27(s32), %bb.14
+
+  bb.13.b.PHI.2.end:
+    G_STORE %19(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.14.b.PHI.3:
+    %20:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %21:_(s64) = G_PHI %19(s64), %bb.12, %24(s64), %bb.7
+    %46:_(s32) = G_CONSTANT i32 1
+    %47:_(s32) = COPY %9(s32)
+    %26:_(s32) = G_AND %47, %46
+    %22:_(s64) = G_SELECT %26(s32), %20, %21
+    %48:_(s32) = COPY %8(s32)
+    %25:_(s32) = G_AND %48, %46
+    %23:_(s64) = G_SELECT %25(s32), %22, %20
+    G_STORE %23(s64), %6(p0) :: (store 8 into %ir.result)
+    G_STORE %20(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+...
+---
+name:            long_chain_ambiguous_double_in_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+fixedStack:
+  - { id: 0, offset: 24, size: 4, alignment: 8, isImmutable: true }
+  - { id: 1, offset: 20, size: 4, alignment: 4, isImmutable: true }
+  - { id: 2, offset: 16, size: 4, alignment: 8, isImmutable: true }
+body:             |
+  ; MIPS32-LABEL: name: long_chain_ambiguous_double_in_fpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.8(0x40000000), %bb.1(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; MIPS32:   [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
+  ; MIPS32:   [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
+  ; MIPS32:   [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; MIPS32:   [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)
+  ; MIPS32:   [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.8
+  ; MIPS32: bb.1.pre.PHI.1:
+  ; MIPS32:   successors: %bb.4(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+  ; MIPS32:   G_BRCOND [[AND1]](s32), %bb.4
+  ; MIPS32: bb.2.pre.PHI.1.0:
+  ; MIPS32:   successors: %bb.5(0x40000000), %bb.3(0x40000000)
+  ; MIPS32:   [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+  ; MIPS32:   G_BRCOND [[AND2]](s32), %bb.5
+  ; MIPS32: bb.3.b.PHI.1.0:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.4.b.PHI.1.1:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.5.b.PHI.1.2:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.c)
+  ; MIPS32: bb.6.b.PHI.1:
+  ; MIPS32:   successors: %bb.7(0x40000000), %bb.13(0x40000000)
+  ; MIPS32:   [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+  ; MIPS32:   [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+  ; MIPS32:   G_BRCOND [[AND3]](s32), %bb.7
+  ; MIPS32:   G_BR %bb.13
+  ; MIPS32: bb.7.b.PHI.1.end:
+  ; MIPS32:   G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.8.pre.PHI.2:
+  ; MIPS32:   successors: %bb.9(0x40000000), %bb.10(0x40000000)
+  ; MIPS32:   [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+  ; MIPS32:   G_BRCOND [[AND4]](s32), %bb.9
+  ; MIPS32:   G_BR %bb.10
+  ; MIPS32: bb.9.b.PHI.2.0:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.11
+  ; MIPS32: bb.10.b.PHI.2.1:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32: bb.11.b.PHI.2:
+  ; MIPS32:   successors: %bb.13(0x40000000), %bb.12(0x40000000)
+  ; MIPS32:   [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+  ; MIPS32:   [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+  ; MIPS32:   G_BRCOND [[AND5]](s32), %bb.13
+  ; MIPS32: bb.12.b.PHI.2.end:
+  ; MIPS32:   G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.13.b.PHI.3:
+  ; MIPS32:   [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+  ; MIPS32:   [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+  ; MIPS32:   [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+  ; MIPS32:   [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+  ; MIPS32:   G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %7:_(s32) = COPY $a0
+    %8:_(s32) = COPY $a1
+    %9:_(s32) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %10:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    %4:_(p0) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.2, align 8)
+    %11:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    %5:_(p0) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.1)
+    %12:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    %6:_(p0) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.0, align 8)
+    %32:_(s32) = G_CONSTANT i32 1
+    %33:_(s32) = COPY %7(s32)
+    %31:_(s32) = G_AND %33, %32
+    G_BRCOND %31(s32), %bb.9
+
+  bb.2.pre.PHI.1:
+    %34:_(s32) = G_CONSTANT i32 1
+    %35:_(s32) = COPY %8(s32)
+    %30:_(s32) = G_AND %35, %34
+    G_BRCOND %30(s32), %bb.5
+
+  bb.3.pre.PHI.1.0:
+    %36:_(s32) = G_CONSTANT i32 1
+    %37:_(s32) = COPY %9(s32)
+    %29:_(s32) = G_AND %37, %36
+    G_BRCOND %29(s32), %bb.6
+
+  bb.4.b.PHI.1.0:
+    %13:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.7
+
+  bb.5.b.PHI.1.1:
+    %15:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+    G_BR %bb.7
+
+  bb.6.b.PHI.1.2:
+    %14:_(s64) = G_LOAD %5(p0) :: (load 8 from %ir.c)
+
+  bb.7.b.PHI.1:
+    %16:_(s64) = G_PHI %13(s64), %bb.4, %15(s64), %bb.5, %14(s64), %bb.6
+    %38:_(s32) = G_CONSTANT i32 1
+    %39:_(s32) = COPY %9(s32)
+    %28:_(s32) = G_AND %39, %38
+    G_BRCOND %28(s32), %bb.8
+    G_BR %bb.14
+
+  bb.8.b.PHI.1.end:
+    G_STORE %16(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.9.pre.PHI.2:
+    %40:_(s32) = G_CONSTANT i32 1
+    %41:_(s32) = COPY %7(s32)
+    %27:_(s32) = G_AND %41, %40
+    G_BRCOND %27(s32), %bb.10
+    G_BR %bb.11
+
+  bb.10.b.PHI.2.0:
+    %18:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.12
+
+  bb.11.b.PHI.2.1:
+    %17:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+
+  bb.12.b.PHI.2:
+    %19:_(s64) = G_PHI %18(s64), %bb.10, %17(s64), %bb.11
+    %42:_(s32) = G_CONSTANT i32 1
+    %43:_(s32) = COPY %8(s32)
+    %26:_(s32) = G_AND %43, %42
+    G_BRCOND %26(s32), %bb.14
+
+  bb.13.b.PHI.2.end:
+    G_STORE %19(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.14.b.PHI.3:
+    %20:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %21:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %44:_(s32) = G_CONSTANT i32 1
+    %45:_(s32) = COPY %9(s32)
+    %25:_(s32) = G_AND %45, %44
+    %22:_(s64) = G_SELECT %25(s32), %20, %21
+    %46:_(s32) = COPY %8(s32)
+    %24:_(s32) = G_AND %46, %44
+    %23:_(s64) = G_SELECT %24(s32), %22, %20
+    G_STORE %23(s64), %6(p0) :: (store 8 into %ir.result)
+    G_STORE %20(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+...
+---
+name:            long_chain_double_in_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+fixedStack:
+  - { id: 0, offset: 24, size: 4, alignment: 8, isImmutable: true }
+  - { id: 1, offset: 20, size: 4, alignment: 4, isImmutable: true }
+  - { id: 2, offset: 16, size: 4, alignment: 8, isImmutable: true }
+body:             |
+  ; MIPS32-LABEL: name: long_chain_double_in_fpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.8(0x40000000), %bb.1(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; MIPS32:   [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
+  ; MIPS32:   [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
+  ; MIPS32:   [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; MIPS32:   [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)
+  ; MIPS32:   [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+  ; MIPS32:   [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.8
+  ; MIPS32: bb.1.pre.PHI.1:
+  ; MIPS32:   successors: %bb.4(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+  ; MIPS32:   G_BRCOND [[AND1]](s32), %bb.4
+  ; MIPS32: bb.2.pre.PHI.1.0:
+  ; MIPS32:   successors: %bb.5(0x40000000), %bb.3(0x40000000)
+  ; MIPS32:   [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+  ; MIPS32:   G_BRCOND [[AND2]](s32), %bb.5
+  ; MIPS32: bb.3.b.PHI.1.0:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.4.b.PHI.1.1:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.5.b.PHI.1.2:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.c)
+  ; MIPS32: bb.6.b.PHI.1:
+  ; MIPS32:   successors: %bb.7(0x40000000), %bb.13(0x40000000)
+  ; MIPS32:   [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+  ; MIPS32:   [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
+  ; MIPS32:   G_BRCOND [[AND3]](s32), %bb.7
+  ; MIPS32:   G_BR %bb.13
+  ; MIPS32: bb.7.b.PHI.1.end:
+  ; MIPS32:   G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.8.pre.PHI.2:
+  ; MIPS32:   successors: %bb.9(0x40000000), %bb.10(0x40000000)
+  ; MIPS32:   [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
+  ; MIPS32:   G_BRCOND [[AND4]](s32), %bb.9
+  ; MIPS32:   G_BR %bb.10
+  ; MIPS32: bb.9.b.PHI.2.0:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.11
+  ; MIPS32: bb.10.b.PHI.2.1:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32: bb.11.b.PHI.2:
+  ; MIPS32:   successors: %bb.13(0x40000000), %bb.12(0x40000000)
+  ; MIPS32:   [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+  ; MIPS32:   [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
+  ; MIPS32:   G_BRCOND [[AND5]](s32), %bb.13
+  ; MIPS32: bb.12.b.PHI.2.end:
+  ; MIPS32:   G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.13.b.PHI.3:
+  ; MIPS32:   [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
+  ; MIPS32:   [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
+  ; MIPS32:   [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+  ; MIPS32:   [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
+  ; MIPS32:   [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+  ; MIPS32:   G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %7:_(s32) = COPY $a0
+    %8:_(s32) = COPY $a1
+    %9:_(s32) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %10:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    %4:_(p0) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.2, align 8)
+    %11:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    %5:_(p0) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.1)
+    %12:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    %6:_(p0) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.0, align 8)
+    %24:_(s64) = G_FCONSTANT double 0.000000e+00
+    %33:_(s32) = G_CONSTANT i32 1
+    %34:_(s32) = COPY %7(s32)
+    %32:_(s32) = G_AND %34, %33
+    G_BRCOND %32(s32), %bb.9
+
+  bb.2.pre.PHI.1:
+    %35:_(s32) = G_CONSTANT i32 1
+    %36:_(s32) = COPY %8(s32)
+    %31:_(s32) = G_AND %36, %35
+    G_BRCOND %31(s32), %bb.5
+
+  bb.3.pre.PHI.1.0:
+    %37:_(s32) = G_CONSTANT i32 1
+    %38:_(s32) = COPY %9(s32)
+    %30:_(s32) = G_AND %38, %37
+    G_BRCOND %30(s32), %bb.6
+
+  bb.4.b.PHI.1.0:
+    %13:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.7
+
+  bb.5.b.PHI.1.1:
+    %15:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+    G_BR %bb.7
+
+  bb.6.b.PHI.1.2:
+    %14:_(s64) = G_LOAD %5(p0) :: (load 8 from %ir.c)
+
+  bb.7.b.PHI.1:
+    %16:_(s64) = G_PHI %13(s64), %bb.4, %15(s64), %bb.5, %14(s64), %bb.6
+    %39:_(s32) = G_CONSTANT i32 1
+    %40:_(s32) = COPY %9(s32)
+    %29:_(s32) = G_AND %40, %39
+    G_BRCOND %29(s32), %bb.8
+    G_BR %bb.14
+
+  bb.8.b.PHI.1.end:
+    G_STORE %16(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.9.pre.PHI.2:
+    %41:_(s32) = G_CONSTANT i32 1
+    %42:_(s32) = COPY %7(s32)
+    %28:_(s32) = G_AND %42, %41
+    G_BRCOND %28(s32), %bb.10
+    G_BR %bb.11
+
+  bb.10.b.PHI.2.0:
+    %18:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.12
+
+  bb.11.b.PHI.2.1:
+    %17:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+
+  bb.12.b.PHI.2:
+    %19:_(s64) = G_PHI %18(s64), %bb.10, %17(s64), %bb.11
+    %43:_(s32) = G_CONSTANT i32 1
+    %44:_(s32) = COPY %8(s32)
+    %27:_(s32) = G_AND %44, %43
+    G_BRCOND %27(s32), %bb.14
+
+  bb.13.b.PHI.2.end:
+    G_STORE %19(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.14.b.PHI.3:
+    %20:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %21:_(s64) = G_PHI %19(s64), %bb.12, %24(s64), %bb.7
+    %45:_(s32) = G_CONSTANT i32 1
+    %46:_(s32) = COPY %9(s32)
+    %26:_(s32) = G_AND %46, %45
+    %22:_(s64) = G_SELECT %26(s32), %20, %21
+    %47:_(s32) = COPY %8(s32)
+    %25:_(s32) = G_AND %47, %45
+    %23:_(s64) = G_SELECT %25(s32), %22, %20
+    G_STORE %23(s64), %6(p0) :: (store 8 into %ir.result)
+    G_STORE %20(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+...

Added: llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir?rev=365743&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir (added)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/long_ambiguous_chain_s64.mir Thu Jul 11 02:22:49 2019
@@ -0,0 +1,1047 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+--- |
+
+  define void @long_chain_ambiguous_i64_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+  entry:
+    br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+  pre.PHI.1:                                        ; preds = %entry
+    br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+  pre.PHI.1.0:                                      ; preds = %pre.PHI.1
+    br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+  b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
+    %phi1.0 = load i64, i64* %a
+    br label %b.PHI.1
+
+  b.PHI.1.1:                                        ; preds = %pre.PHI.1
+    %phi1.1 = load i64, i64* %b
+    br label %b.PHI.1
+
+  b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
+    %phi1.2 = load i64, i64* %c
+    br label %b.PHI.1
+
+  b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
+    %phi1 = phi i64 [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+    br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+  b.PHI.1.end:                                      ; preds = %b.PHI.1
+    store i64 %phi1, i64* %result
+    ret void
+
+  pre.PHI.2:                                        ; preds = %entry
+    br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+  b.PHI.2.0:                                        ; preds = %pre.PHI.2
+    %phi2.0 = load i64, i64* %a
+    br label %b.PHI.2
+
+  b.PHI.2.1:                                        ; preds = %pre.PHI.2
+    %phi2.1 = load i64, i64* %b
+    br label %b.PHI.2
+
+  b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
+    %phi2 = phi i64 [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+  b.PHI.2.end:                                      ; preds = %b.PHI.2
+    store i64 %phi2, i64* %result
+    ret void
+
+  b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
+    %phi3 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
+    %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
+    store i64 %sel_3_1.2, i64* %result
+    store i64 %phi3, i64* %result
+    ret void
+  }
+
+  define void @long_chain_i64_in_gpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, i64* %a, i64* %b, i64* %c, i64* %result) {
+  entry:
+    br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+  pre.PHI.1:                                        ; preds = %entry
+    br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+  pre.PHI.1.0:                                      ; preds = %pre.PHI.1
+    br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+  b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
+    %phi1.0 = load i64, i64* %a
+    br label %b.PHI.1
+
+  b.PHI.1.1:                                        ; preds = %pre.PHI.1
+    %phi1.1 = load i64, i64* %b
+    br label %b.PHI.1
+
+  b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
+    %phi1.2 = load i64, i64* %c
+    br label %b.PHI.1
+
+  b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
+    %phi1 = phi i64 [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+    br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+  b.PHI.1.end:                                      ; preds = %b.PHI.1
+    store i64 %phi1, i64* %result
+    ret void
+
+  pre.PHI.2:                                        ; preds = %entry
+    br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+  b.PHI.2.0:                                        ; preds = %pre.PHI.2
+    %phi2.0 = load i64, i64* %a
+    br label %b.PHI.2
+
+  b.PHI.2.1:                                        ; preds = %pre.PHI.2
+    %phi2.1 = load i64, i64* %b
+    br label %b.PHI.2
+
+  b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
+    %phi2 = phi i64 [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+  b.PHI.2.end:                                      ; preds = %b.PHI.2
+    store i64 %phi2, i64* %result
+    ret void
+
+  b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
+    %phi3 = phi i64 [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %phi4 = phi i64 [ %phi2, %b.PHI.2 ], [ 0, %b.PHI.1 ]
+    %sel_1.2 = select i1 %cnd2, i64 %phi3, i64 %phi4
+    %sel_3_1.2 = select i1 %cnd1, i64 %sel_1.2, i64 %phi3
+    store i64 %sel_3_1.2, i64* %result
+    store i64 %phi3, i64* %result
+    ret void
+  }
+
+  define void @long_chain_ambiguous_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+  entry:
+    br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+  pre.PHI.1:                                        ; preds = %entry
+    br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+  pre.PHI.1.0:                                      ; preds = %pre.PHI.1
+    br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+  b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
+    %phi1.0 = load double, double* %a
+    br label %b.PHI.1
+
+  b.PHI.1.1:                                        ; preds = %pre.PHI.1
+    %phi1.1 = load double, double* %b
+    br label %b.PHI.1
+
+  b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
+    %phi1.2 = load double, double* %c
+    br label %b.PHI.1
+
+  b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
+    %phi1 = phi double [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+    br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+  b.PHI.1.end:                                      ; preds = %b.PHI.1
+    store double %phi1, double* %result
+    ret void
+
+  pre.PHI.2:                                        ; preds = %entry
+    br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+  b.PHI.2.0:                                        ; preds = %pre.PHI.2
+    %phi2.0 = load double, double* %a
+    br label %b.PHI.2
+
+  b.PHI.2.1:                                        ; preds = %pre.PHI.2
+    %phi2.1 = load double, double* %b
+    br label %b.PHI.2
+
+  b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
+    %phi2 = phi double [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+  b.PHI.2.end:                                      ; preds = %b.PHI.2
+    store double %phi2, double* %result
+    ret void
+
+  b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
+    %phi3 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %phi4 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
+    %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
+    store double %sel_3_1.2, double* %result
+    store double %phi3, double* %result
+    ret void
+  }
+
+  define void @long_chain_double_in_fpr(i1 %cnd0, i1 %cnd1, i1 %cnd2, double* %a, double* %b, double* %c, double* %result) {
+  entry:
+    br i1 %cnd0, label %pre.PHI.2, label %pre.PHI.1
+
+  pre.PHI.1:                                        ; preds = %entry
+    br i1 %cnd1, label %b.PHI.1.1, label %pre.PHI.1.0
+
+  pre.PHI.1.0:                                      ; preds = %pre.PHI.1
+    br i1 %cnd2, label %b.PHI.1.2, label %b.PHI.1.0
+
+  b.PHI.1.0:                                        ; preds = %pre.PHI.1.0
+    %phi1.0 = load double, double* %a
+    br label %b.PHI.1
+
+  b.PHI.1.1:                                        ; preds = %pre.PHI.1
+    %phi1.1 = load double, double* %b
+    br label %b.PHI.1
+
+  b.PHI.1.2:                                        ; preds = %pre.PHI.1.0
+    %phi1.2 = load double, double* %c
+    br label %b.PHI.1
+
+  b.PHI.1:                                          ; preds = %b.PHI.1.2, %b.PHI.1.1, %b.PHI.1.0
+    %phi1 = phi double [ %phi1.0, %b.PHI.1.0 ], [ %phi1.1, %b.PHI.1.1 ], [ %phi1.2, %b.PHI.1.2 ]
+    br i1 %cnd2, label %b.PHI.1.end, label %b.PHI.3
+
+  b.PHI.1.end:                                      ; preds = %b.PHI.1
+    store double %phi1, double* %result
+    ret void
+
+  pre.PHI.2:                                        ; preds = %entry
+    br i1 %cnd0, label %b.PHI.2.0, label %b.PHI.2.1
+
+  b.PHI.2.0:                                        ; preds = %pre.PHI.2
+    %phi2.0 = load double, double* %a
+    br label %b.PHI.2
+
+  b.PHI.2.1:                                        ; preds = %pre.PHI.2
+    %phi2.1 = load double, double* %b
+    br label %b.PHI.2
+
+  b.PHI.2:                                          ; preds = %b.PHI.2.1, %b.PHI.2.0
+    %phi2 = phi double [ %phi2.0, %b.PHI.2.0 ], [ %phi2.1, %b.PHI.2.1 ]
+    br i1 %cnd1, label %b.PHI.3, label %b.PHI.2.end
+
+  b.PHI.2.end:                                      ; preds = %b.PHI.2
+    store double %phi2, double* %result
+    ret void
+
+  b.PHI.3:                                          ; preds = %b.PHI.2, %b.PHI.1
+    %phi3 = phi double [ %phi2, %b.PHI.2 ], [ %phi1, %b.PHI.1 ]
+    %phi4 = phi double [ %phi2, %b.PHI.2 ], [ 0.000000e+00, %b.PHI.1 ]
+    %sel_1.2 = select i1 %cnd2, double %phi3, double %phi4
+    %sel_3_1.2 = select i1 %cnd1, double %sel_1.2, double %phi3
+    store double %sel_3_1.2, double* %result
+    store double %phi3, double* %result
+    ret void
+  }
+
+...
+---
+name:            long_chain_ambiguous_i64_in_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+fixedStack:
+  - { id: 0, offset: 24, size: 4, alignment: 8, isImmutable: true }
+  - { id: 1, offset: 20, size: 4, alignment: 4, isImmutable: true }
+  - { id: 2, offset: 16, size: 4, alignment: 8, isImmutable: true }
+body:             |
+  ; MIPS32-LABEL: name: long_chain_ambiguous_i64_in_fpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.8(0x40000000), %bb.1(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; MIPS32:   [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
+  ; MIPS32:   [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
+  ; MIPS32:   [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; MIPS32:   [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)
+  ; MIPS32:   [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.8
+  ; MIPS32: bb.1.pre.PHI.1:
+  ; MIPS32:   successors: %bb.4(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+  ; MIPS32:   G_BRCOND [[AND1]](s32), %bb.4
+  ; MIPS32: bb.2.pre.PHI.1.0:
+  ; MIPS32:   successors: %bb.5(0x40000000), %bb.3(0x40000000)
+  ; MIPS32:   [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+  ; MIPS32:   G_BRCOND [[AND2]](s32), %bb.5
+  ; MIPS32: bb.3.b.PHI.1.0:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.4.b.PHI.1.1:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.5.b.PHI.1.2:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.c)
+  ; MIPS32: bb.6.b.PHI.1:
+  ; MIPS32:   successors: %bb.7(0x40000000), %bb.13(0x40000000)
+  ; MIPS32:   [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+  ; MIPS32:   [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+  ; MIPS32:   G_BRCOND [[AND3]](s32), %bb.7
+  ; MIPS32:   G_BR %bb.13
+  ; MIPS32: bb.7.b.PHI.1.end:
+  ; MIPS32:   G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.8.pre.PHI.2:
+  ; MIPS32:   successors: %bb.9(0x40000000), %bb.10(0x40000000)
+  ; MIPS32:   [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+  ; MIPS32:   G_BRCOND [[AND4]](s32), %bb.9
+  ; MIPS32:   G_BR %bb.10
+  ; MIPS32: bb.9.b.PHI.2.0:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.11
+  ; MIPS32: bb.10.b.PHI.2.1:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32: bb.11.b.PHI.2:
+  ; MIPS32:   successors: %bb.13(0x40000000), %bb.12(0x40000000)
+  ; MIPS32:   [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+  ; MIPS32:   [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+  ; MIPS32:   G_BRCOND [[AND5]](s32), %bb.13
+  ; MIPS32: bb.12.b.PHI.2.end:
+  ; MIPS32:   G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.13.b.PHI.3:
+  ; MIPS32:   [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+  ; MIPS32:   [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+  ; MIPS32:   [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+  ; MIPS32:   [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+  ; MIPS32:   G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %7:_(s32) = COPY $a0
+    %8:_(s32) = COPY $a1
+    %9:_(s32) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %10:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    %4:_(p0) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.2, align 8)
+    %11:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    %5:_(p0) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.1)
+    %12:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    %6:_(p0) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.0, align 8)
+    %32:_(s32) = G_CONSTANT i32 1
+    %33:_(s32) = COPY %7(s32)
+    %31:_(s32) = G_AND %33, %32
+    G_BRCOND %31(s32), %bb.9
+
+  bb.2.pre.PHI.1:
+    %34:_(s32) = G_CONSTANT i32 1
+    %35:_(s32) = COPY %8(s32)
+    %30:_(s32) = G_AND %35, %34
+    G_BRCOND %30(s32), %bb.5
+
+  bb.3.pre.PHI.1.0:
+    %36:_(s32) = G_CONSTANT i32 1
+    %37:_(s32) = COPY %9(s32)
+    %29:_(s32) = G_AND %37, %36
+    G_BRCOND %29(s32), %bb.6
+
+  bb.4.b.PHI.1.0:
+    %13:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.7
+
+  bb.5.b.PHI.1.1:
+    %15:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+    G_BR %bb.7
+
+  bb.6.b.PHI.1.2:
+    %14:_(s64) = G_LOAD %5(p0) :: (load 8 from %ir.c)
+
+  bb.7.b.PHI.1:
+    %16:_(s64) = G_PHI %13(s64), %bb.4, %15(s64), %bb.5, %14(s64), %bb.6
+    %38:_(s32) = G_CONSTANT i32 1
+    %39:_(s32) = COPY %9(s32)
+    %28:_(s32) = G_AND %39, %38
+    G_BRCOND %28(s32), %bb.8
+    G_BR %bb.14
+
+  bb.8.b.PHI.1.end:
+    G_STORE %16(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.9.pre.PHI.2:
+    %40:_(s32) = G_CONSTANT i32 1
+    %41:_(s32) = COPY %7(s32)
+    %27:_(s32) = G_AND %41, %40
+    G_BRCOND %27(s32), %bb.10
+    G_BR %bb.11
+
+  bb.10.b.PHI.2.0:
+    %18:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.12
+
+  bb.11.b.PHI.2.1:
+    %17:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+
+  bb.12.b.PHI.2:
+    %19:_(s64) = G_PHI %18(s64), %bb.10, %17(s64), %bb.11
+    %42:_(s32) = G_CONSTANT i32 1
+    %43:_(s32) = COPY %8(s32)
+    %26:_(s32) = G_AND %43, %42
+    G_BRCOND %26(s32), %bb.14
+
+  bb.13.b.PHI.2.end:
+    G_STORE %19(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.14.b.PHI.3:
+    %20:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %21:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %44:_(s32) = G_CONSTANT i32 1
+    %45:_(s32) = COPY %9(s32)
+    %25:_(s32) = G_AND %45, %44
+    %22:_(s64) = G_SELECT %25(s32), %20, %21
+    %46:_(s32) = COPY %8(s32)
+    %24:_(s32) = G_AND %46, %44
+    %23:_(s64) = G_SELECT %24(s32), %22, %20
+    G_STORE %23(s64), %6(p0) :: (store 8 into %ir.result)
+    G_STORE %20(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+...
+---
+name:            long_chain_i64_in_gpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+fixedStack:
+  - { id: 0, offset: 24, size: 4, alignment: 8, isImmutable: true }
+  - { id: 1, offset: 20, size: 4, alignment: 4, isImmutable: true }
+  - { id: 2, offset: 16, size: 4, alignment: 8, isImmutable: true }
+body:             |
+  ; MIPS32-LABEL: name: long_chain_i64_in_gpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.8(0x40000000), %bb.1(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; MIPS32:   [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
+  ; MIPS32:   [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
+  ; MIPS32:   [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; MIPS32:   [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)
+  ; MIPS32:   [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 0
+  ; MIPS32:   [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.8
+  ; MIPS32: bb.1.pre.PHI.1:
+  ; MIPS32:   successors: %bb.4(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+  ; MIPS32:   G_BRCOND [[AND1]](s32), %bb.4
+  ; MIPS32: bb.2.pre.PHI.1.0:
+  ; MIPS32:   successors: %bb.5(0x40000000), %bb.3(0x40000000)
+  ; MIPS32:   [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+  ; MIPS32:   G_BRCOND [[AND2]](s32), %bb.5
+  ; MIPS32: bb.3.b.PHI.1.0:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD3:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load 4 from %ir.a, align 8)
+  ; MIPS32:   [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP:%[0-9]+]]:gprb(p0) = G_GEP [[COPY3]], [[C4]](s32)
+  ; MIPS32:   [[LOAD4:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP]](p0) :: (load 4 from %ir.a + 4, align 8)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.4.b.PHI.1.1:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD5:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.b, align 8)
+  ; MIPS32:   [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP1:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD]], [[C5]](s32)
+  ; MIPS32:   [[LOAD6:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP1]](p0) :: (load 4 from %ir.b + 4, align 8)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.5.b.PHI.1.2:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD7:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD1]](p0) :: (load 4 from %ir.c, align 8)
+  ; MIPS32:   [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP2:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD1]], [[C6]](s32)
+  ; MIPS32:   [[LOAD8:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP2]](p0) :: (load 4 from %ir.c + 4, align 8)
+  ; MIPS32: bb.6.b.PHI.1:
+  ; MIPS32:   successors: %bb.7(0x40000000), %bb.13(0x40000000)
+  ; MIPS32:   [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD3]](s32), %bb.3, [[LOAD5]](s32), %bb.4, [[LOAD7]](s32), %bb.5
+  ; MIPS32:   [[PHI1:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD4]](s32), %bb.3, [[LOAD6]](s32), %bb.4, [[LOAD8]](s32), %bb.5
+  ; MIPS32:   [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C7]]
+  ; MIPS32:   G_BRCOND [[AND3]](s32), %bb.7
+  ; MIPS32:   G_BR %bb.13
+  ; MIPS32: bb.7.b.PHI.1.end:
+  ; MIPS32:   G_STORE [[PHI]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
+  ; MIPS32:   [[C8:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP3:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD2]], [[C8]](s32)
+  ; MIPS32:   G_STORE [[PHI1]](s32), [[GEP3]](p0) :: (store 4 into %ir.result + 4, align 8)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.8.pre.PHI.2:
+  ; MIPS32:   successors: %bb.9(0x40000000), %bb.10(0x40000000)
+  ; MIPS32:   [[C9:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C9]]
+  ; MIPS32:   G_BRCOND [[AND4]](s32), %bb.9
+  ; MIPS32:   G_BR %bb.10
+  ; MIPS32: bb.9.b.PHI.2.0:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD9:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY3]](p0) :: (load 4 from %ir.a, align 8)
+  ; MIPS32:   [[C10:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP4:%[0-9]+]]:gprb(p0) = G_GEP [[COPY3]], [[C10]](s32)
+  ; MIPS32:   [[LOAD10:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP4]](p0) :: (load 4 from %ir.a + 4, align 8)
+  ; MIPS32:   G_BR %bb.11
+  ; MIPS32: bb.10.b.PHI.2.1:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD11:%[0-9]+]]:gprb(s32) = G_LOAD [[LOAD]](p0) :: (load 4 from %ir.b, align 8)
+  ; MIPS32:   [[C11:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP5:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD]], [[C11]](s32)
+  ; MIPS32:   [[LOAD12:%[0-9]+]]:gprb(s32) = G_LOAD [[GEP5]](p0) :: (load 4 from %ir.b + 4, align 8)
+  ; MIPS32: bb.11.b.PHI.2:
+  ; MIPS32:   successors: %bb.13(0x40000000), %bb.12(0x40000000)
+  ; MIPS32:   [[PHI2:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD9]](s32), %bb.9, [[LOAD11]](s32), %bb.10
+  ; MIPS32:   [[PHI3:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD10]](s32), %bb.9, [[LOAD12]](s32), %bb.10
+  ; MIPS32:   [[C12:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C12]]
+  ; MIPS32:   G_BRCOND [[AND5]](s32), %bb.13
+  ; MIPS32: bb.12.b.PHI.2.end:
+  ; MIPS32:   G_STORE [[PHI2]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
+  ; MIPS32:   [[C13:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP6:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD2]], [[C13]](s32)
+  ; MIPS32:   G_STORE [[PHI3]](s32), [[GEP6]](p0) :: (store 4 into %ir.result + 4, align 8)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.13.b.PHI.3:
+  ; MIPS32:   [[PHI4:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[PHI]](s32), %bb.6
+  ; MIPS32:   [[PHI5:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[PHI1]](s32), %bb.6
+  ; MIPS32:   [[PHI6:%[0-9]+]]:gprb(s32) = G_PHI [[PHI2]](s32), %bb.11, [[C]](s32), %bb.6
+  ; MIPS32:   [[PHI7:%[0-9]+]]:gprb(s32) = G_PHI [[PHI3]](s32), %bb.11, [[C]](s32), %bb.6
+  ; MIPS32:   [[C14:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C14]]
+  ; MIPS32:   [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI4]], [[PHI6]]
+  ; MIPS32:   [[SELECT1:%[0-9]+]]:gprb(s32) = G_SELECT [[AND6]](s32), [[PHI5]], [[PHI7]]
+  ; MIPS32:   [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C14]]
+  ; MIPS32:   [[SELECT2:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI4]]
+  ; MIPS32:   [[SELECT3:%[0-9]+]]:gprb(s32) = G_SELECT [[AND7]](s32), [[SELECT1]], [[PHI5]]
+  ; MIPS32:   G_STORE [[SELECT2]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
+  ; MIPS32:   [[C15:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP7:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD2]], [[C15]](s32)
+  ; MIPS32:   G_STORE [[SELECT3]](s32), [[GEP7]](p0) :: (store 4 into %ir.result + 4, align 8)
+  ; MIPS32:   G_STORE [[PHI4]](s32), [[LOAD2]](p0) :: (store 4 into %ir.result, align 8)
+  ; MIPS32:   [[C16:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 4
+  ; MIPS32:   [[GEP8:%[0-9]+]]:gprb(p0) = G_GEP [[LOAD2]], [[C16]](s32)
+  ; MIPS32:   G_STORE [[PHI5]](s32), [[GEP8]](p0) :: (store 4 into %ir.result + 4, align 8)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %7:_(s32) = COPY $a0
+    %8:_(s32) = COPY $a1
+    %9:_(s32) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %10:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    %4:_(p0) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.2, align 8)
+    %11:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    %5:_(p0) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.1)
+    %12:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    %6:_(p0) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.0, align 8)
+    %33:_(s32) = G_CONSTANT i32 0
+    %24:_(s64) = G_MERGE_VALUES %33(s32), %33(s32)
+    %34:_(s32) = G_CONSTANT i32 1
+    %35:_(s32) = COPY %7(s32)
+    %32:_(s32) = G_AND %35, %34
+    G_BRCOND %32(s32), %bb.9
+
+  bb.2.pre.PHI.1:
+    %36:_(s32) = G_CONSTANT i32 1
+    %37:_(s32) = COPY %8(s32)
+    %31:_(s32) = G_AND %37, %36
+    G_BRCOND %31(s32), %bb.5
+
+  bb.3.pre.PHI.1.0:
+    %38:_(s32) = G_CONSTANT i32 1
+    %39:_(s32) = COPY %9(s32)
+    %30:_(s32) = G_AND %39, %38
+    G_BRCOND %30(s32), %bb.6
+
+  bb.4.b.PHI.1.0:
+    %13:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.7
+
+  bb.5.b.PHI.1.1:
+    %15:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+    G_BR %bb.7
+
+  bb.6.b.PHI.1.2:
+    %14:_(s64) = G_LOAD %5(p0) :: (load 8 from %ir.c)
+
+  bb.7.b.PHI.1:
+    %16:_(s64) = G_PHI %13(s64), %bb.4, %15(s64), %bb.5, %14(s64), %bb.6
+    %40:_(s32) = G_CONSTANT i32 1
+    %41:_(s32) = COPY %9(s32)
+    %29:_(s32) = G_AND %41, %40
+    G_BRCOND %29(s32), %bb.8
+    G_BR %bb.14
+
+  bb.8.b.PHI.1.end:
+    G_STORE %16(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.9.pre.PHI.2:
+    %42:_(s32) = G_CONSTANT i32 1
+    %43:_(s32) = COPY %7(s32)
+    %28:_(s32) = G_AND %43, %42
+    G_BRCOND %28(s32), %bb.10
+    G_BR %bb.11
+
+  bb.10.b.PHI.2.0:
+    %18:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.12
+
+  bb.11.b.PHI.2.1:
+    %17:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+
+  bb.12.b.PHI.2:
+    %19:_(s64) = G_PHI %18(s64), %bb.10, %17(s64), %bb.11
+    %44:_(s32) = G_CONSTANT i32 1
+    %45:_(s32) = COPY %8(s32)
+    %27:_(s32) = G_AND %45, %44
+    G_BRCOND %27(s32), %bb.14
+
+  bb.13.b.PHI.2.end:
+    G_STORE %19(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.14.b.PHI.3:
+    %20:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %21:_(s64) = G_PHI %19(s64), %bb.12, %24(s64), %bb.7
+    %46:_(s32) = G_CONSTANT i32 1
+    %47:_(s32) = COPY %9(s32)
+    %26:_(s32) = G_AND %47, %46
+    %22:_(s64) = G_SELECT %26(s32), %20, %21
+    %48:_(s32) = COPY %8(s32)
+    %25:_(s32) = G_AND %48, %46
+    %23:_(s64) = G_SELECT %25(s32), %22, %20
+    G_STORE %23(s64), %6(p0) :: (store 8 into %ir.result)
+    G_STORE %20(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+...
+---
+name:            long_chain_ambiguous_double_in_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+fixedStack:
+  - { id: 0, offset: 24, size: 4, alignment: 8, isImmutable: true }
+  - { id: 1, offset: 20, size: 4, alignment: 4, isImmutable: true }
+  - { id: 2, offset: 16, size: 4, alignment: 8, isImmutable: true }
+body:             |
+  ; MIPS32-LABEL: name: long_chain_ambiguous_double_in_fpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.8(0x40000000), %bb.1(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; MIPS32:   [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
+  ; MIPS32:   [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
+  ; MIPS32:   [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; MIPS32:   [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)
+  ; MIPS32:   [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.8
+  ; MIPS32: bb.1.pre.PHI.1:
+  ; MIPS32:   successors: %bb.4(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C1]]
+  ; MIPS32:   G_BRCOND [[AND1]](s32), %bb.4
+  ; MIPS32: bb.2.pre.PHI.1.0:
+  ; MIPS32:   successors: %bb.5(0x40000000), %bb.3(0x40000000)
+  ; MIPS32:   [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C2]]
+  ; MIPS32:   G_BRCOND [[AND2]](s32), %bb.5
+  ; MIPS32: bb.3.b.PHI.1.0:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.4.b.PHI.1.1:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.5.b.PHI.1.2:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.c)
+  ; MIPS32: bb.6.b.PHI.1:
+  ; MIPS32:   successors: %bb.7(0x40000000), %bb.13(0x40000000)
+  ; MIPS32:   [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+  ; MIPS32:   [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C3]]
+  ; MIPS32:   G_BRCOND [[AND3]](s32), %bb.7
+  ; MIPS32:   G_BR %bb.13
+  ; MIPS32: bb.7.b.PHI.1.end:
+  ; MIPS32:   G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.8.pre.PHI.2:
+  ; MIPS32:   successors: %bb.9(0x40000000), %bb.10(0x40000000)
+  ; MIPS32:   [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C4]]
+  ; MIPS32:   G_BRCOND [[AND4]](s32), %bb.9
+  ; MIPS32:   G_BR %bb.10
+  ; MIPS32: bb.9.b.PHI.2.0:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.11
+  ; MIPS32: bb.10.b.PHI.2.1:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32: bb.11.b.PHI.2:
+  ; MIPS32:   successors: %bb.13(0x40000000), %bb.12(0x40000000)
+  ; MIPS32:   [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+  ; MIPS32:   [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C5]]
+  ; MIPS32:   G_BRCOND [[AND5]](s32), %bb.13
+  ; MIPS32: bb.12.b.PHI.2.end:
+  ; MIPS32:   G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.13.b.PHI.3:
+  ; MIPS32:   [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C6]]
+  ; MIPS32:   [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+  ; MIPS32:   [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C6]]
+  ; MIPS32:   [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+  ; MIPS32:   G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %7:_(s32) = COPY $a0
+    %8:_(s32) = COPY $a1
+    %9:_(s32) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %10:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    %4:_(p0) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.2, align 8)
+    %11:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    %5:_(p0) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.1)
+    %12:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    %6:_(p0) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.0, align 8)
+    %32:_(s32) = G_CONSTANT i32 1
+    %33:_(s32) = COPY %7(s32)
+    %31:_(s32) = G_AND %33, %32
+    G_BRCOND %31(s32), %bb.9
+
+  bb.2.pre.PHI.1:
+    %34:_(s32) = G_CONSTANT i32 1
+    %35:_(s32) = COPY %8(s32)
+    %30:_(s32) = G_AND %35, %34
+    G_BRCOND %30(s32), %bb.5
+
+  bb.3.pre.PHI.1.0:
+    %36:_(s32) = G_CONSTANT i32 1
+    %37:_(s32) = COPY %9(s32)
+    %29:_(s32) = G_AND %37, %36
+    G_BRCOND %29(s32), %bb.6
+
+  bb.4.b.PHI.1.0:
+    %13:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.7
+
+  bb.5.b.PHI.1.1:
+    %15:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+    G_BR %bb.7
+
+  bb.6.b.PHI.1.2:
+    %14:_(s64) = G_LOAD %5(p0) :: (load 8 from %ir.c)
+
+  bb.7.b.PHI.1:
+    %16:_(s64) = G_PHI %13(s64), %bb.4, %15(s64), %bb.5, %14(s64), %bb.6
+    %38:_(s32) = G_CONSTANT i32 1
+    %39:_(s32) = COPY %9(s32)
+    %28:_(s32) = G_AND %39, %38
+    G_BRCOND %28(s32), %bb.8
+    G_BR %bb.14
+
+  bb.8.b.PHI.1.end:
+    G_STORE %16(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.9.pre.PHI.2:
+    %40:_(s32) = G_CONSTANT i32 1
+    %41:_(s32) = COPY %7(s32)
+    %27:_(s32) = G_AND %41, %40
+    G_BRCOND %27(s32), %bb.10
+    G_BR %bb.11
+
+  bb.10.b.PHI.2.0:
+    %18:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.12
+
+  bb.11.b.PHI.2.1:
+    %17:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+
+  bb.12.b.PHI.2:
+    %19:_(s64) = G_PHI %18(s64), %bb.10, %17(s64), %bb.11
+    %42:_(s32) = G_CONSTANT i32 1
+    %43:_(s32) = COPY %8(s32)
+    %26:_(s32) = G_AND %43, %42
+    G_BRCOND %26(s32), %bb.14
+
+  bb.13.b.PHI.2.end:
+    G_STORE %19(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.14.b.PHI.3:
+    %20:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %21:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %44:_(s32) = G_CONSTANT i32 1
+    %45:_(s32) = COPY %9(s32)
+    %25:_(s32) = G_AND %45, %44
+    %22:_(s64) = G_SELECT %25(s32), %20, %21
+    %46:_(s32) = COPY %8(s32)
+    %24:_(s32) = G_AND %46, %44
+    %23:_(s64) = G_SELECT %24(s32), %22, %20
+    G_STORE %23(s64), %6(p0) :: (store 8 into %ir.result)
+    G_STORE %20(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+...
+---
+name:            long_chain_double_in_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+fixedStack:
+  - { id: 0, offset: 24, size: 4, alignment: 8, isImmutable: true }
+  - { id: 1, offset: 20, size: 4, alignment: 4, isImmutable: true }
+  - { id: 2, offset: 16, size: 4, alignment: 8, isImmutable: true }
+body:             |
+  ; MIPS32-LABEL: name: long_chain_double_in_fpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.8(0x40000000), %bb.1(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(s32) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[FRAME_INDEX:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.0
+  ; MIPS32:   [[LOAD:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX]](p0) :: (load 4 from %fixed-stack.0, align 8)
+  ; MIPS32:   [[FRAME_INDEX1:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.1
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX1]](p0) :: (load 4 from %fixed-stack.1)
+  ; MIPS32:   [[FRAME_INDEX2:%[0-9]+]]:gprb(p0) = G_FRAME_INDEX %fixed-stack.2
+  ; MIPS32:   [[LOAD2:%[0-9]+]]:gprb(p0) = G_LOAD [[FRAME_INDEX2]](p0) :: (load 4 from %fixed-stack.2, align 8)
+  ; MIPS32:   [[C:%[0-9]+]]:fprb(s64) = G_FCONSTANT double 0.000000e+00
+  ; MIPS32:   [[C1:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C1]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.8
+  ; MIPS32: bb.1.pre.PHI.1:
+  ; MIPS32:   successors: %bb.4(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   [[C2:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY5:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND1:%[0-9]+]]:gprb(s32) = G_AND [[COPY5]], [[C2]]
+  ; MIPS32:   G_BRCOND [[AND1]](s32), %bb.4
+  ; MIPS32: bb.2.pre.PHI.1.0:
+  ; MIPS32:   successors: %bb.5(0x40000000), %bb.3(0x40000000)
+  ; MIPS32:   [[C3:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY6:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND2:%[0-9]+]]:gprb(s32) = G_AND [[COPY6]], [[C3]]
+  ; MIPS32:   G_BRCOND [[AND2]](s32), %bb.5
+  ; MIPS32: bb.3.b.PHI.1.0:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD3:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.4.b.PHI.1.1:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD4:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32:   G_BR %bb.6
+  ; MIPS32: bb.5.b.PHI.1.2:
+  ; MIPS32:   successors: %bb.6(0x80000000)
+  ; MIPS32:   [[LOAD5:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD1]](p0) :: (load 8 from %ir.c)
+  ; MIPS32: bb.6.b.PHI.1:
+  ; MIPS32:   successors: %bb.7(0x40000000), %bb.13(0x40000000)
+  ; MIPS32:   [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD3]](s64), %bb.3, [[LOAD4]](s64), %bb.4, [[LOAD5]](s64), %bb.5
+  ; MIPS32:   [[C4:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY7:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND3:%[0-9]+]]:gprb(s32) = G_AND [[COPY7]], [[C4]]
+  ; MIPS32:   G_BRCOND [[AND3]](s32), %bb.7
+  ; MIPS32:   G_BR %bb.13
+  ; MIPS32: bb.7.b.PHI.1.end:
+  ; MIPS32:   G_STORE [[PHI]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.8.pre.PHI.2:
+  ; MIPS32:   successors: %bb.9(0x40000000), %bb.10(0x40000000)
+  ; MIPS32:   [[C5:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY8:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND4:%[0-9]+]]:gprb(s32) = G_AND [[COPY8]], [[C5]]
+  ; MIPS32:   G_BRCOND [[AND4]](s32), %bb.9
+  ; MIPS32:   G_BR %bb.10
+  ; MIPS32: bb.9.b.PHI.2.0:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD6:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY3]](p0) :: (load 8 from %ir.a)
+  ; MIPS32:   G_BR %bb.11
+  ; MIPS32: bb.10.b.PHI.2.1:
+  ; MIPS32:   successors: %bb.11(0x80000000)
+  ; MIPS32:   [[LOAD7:%[0-9]+]]:fprb(s64) = G_LOAD [[LOAD]](p0) :: (load 8 from %ir.b)
+  ; MIPS32: bb.11.b.PHI.2:
+  ; MIPS32:   successors: %bb.13(0x40000000), %bb.12(0x40000000)
+  ; MIPS32:   [[PHI1:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD6]](s64), %bb.9, [[LOAD7]](s64), %bb.10
+  ; MIPS32:   [[C6:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY9:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND5:%[0-9]+]]:gprb(s32) = G_AND [[COPY9]], [[C6]]
+  ; MIPS32:   G_BRCOND [[AND5]](s32), %bb.13
+  ; MIPS32: bb.12.b.PHI.2.end:
+  ; MIPS32:   G_STORE [[PHI1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  ; MIPS32: bb.13.b.PHI.3:
+  ; MIPS32:   [[PHI2:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[PHI]](s64), %bb.6
+  ; MIPS32:   [[PHI3:%[0-9]+]]:fprb(s64) = G_PHI [[PHI1]](s64), %bb.11, [[C]](s64), %bb.6
+  ; MIPS32:   [[C7:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY10:%[0-9]+]]:gprb(s32) = COPY [[COPY2]](s32)
+  ; MIPS32:   [[AND6:%[0-9]+]]:gprb(s32) = G_AND [[COPY10]], [[C7]]
+  ; MIPS32:   [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND6]](s32), [[PHI2]], [[PHI3]]
+  ; MIPS32:   [[COPY11:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+  ; MIPS32:   [[AND7:%[0-9]+]]:gprb(s32) = G_AND [[COPY11]], [[C7]]
+  ; MIPS32:   [[SELECT1:%[0-9]+]]:fprb(s64) = G_SELECT [[AND7]](s32), [[SELECT]], [[PHI2]]
+  ; MIPS32:   G_STORE [[SELECT1]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   G_STORE [[PHI2]](s64), [[LOAD2]](p0) :: (store 8 into %ir.result)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %7:_(s32) = COPY $a0
+    %8:_(s32) = COPY $a1
+    %9:_(s32) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %10:_(p0) = G_FRAME_INDEX %fixed-stack.2
+    %4:_(p0) = G_LOAD %10(p0) :: (load 4 from %fixed-stack.2, align 8)
+    %11:_(p0) = G_FRAME_INDEX %fixed-stack.1
+    %5:_(p0) = G_LOAD %11(p0) :: (load 4 from %fixed-stack.1)
+    %12:_(p0) = G_FRAME_INDEX %fixed-stack.0
+    %6:_(p0) = G_LOAD %12(p0) :: (load 4 from %fixed-stack.0, align 8)
+    %24:_(s64) = G_FCONSTANT double 0.000000e+00
+    %33:_(s32) = G_CONSTANT i32 1
+    %34:_(s32) = COPY %7(s32)
+    %32:_(s32) = G_AND %34, %33
+    G_BRCOND %32(s32), %bb.9
+
+  bb.2.pre.PHI.1:
+    %35:_(s32) = G_CONSTANT i32 1
+    %36:_(s32) = COPY %8(s32)
+    %31:_(s32) = G_AND %36, %35
+    G_BRCOND %31(s32), %bb.5
+
+  bb.3.pre.PHI.1.0:
+    %37:_(s32) = G_CONSTANT i32 1
+    %38:_(s32) = COPY %9(s32)
+    %30:_(s32) = G_AND %38, %37
+    G_BRCOND %30(s32), %bb.6
+
+  bb.4.b.PHI.1.0:
+    %13:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.7
+
+  bb.5.b.PHI.1.1:
+    %15:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+    G_BR %bb.7
+
+  bb.6.b.PHI.1.2:
+    %14:_(s64) = G_LOAD %5(p0) :: (load 8 from %ir.c)
+
+  bb.7.b.PHI.1:
+    %16:_(s64) = G_PHI %13(s64), %bb.4, %15(s64), %bb.5, %14(s64), %bb.6
+    %39:_(s32) = G_CONSTANT i32 1
+    %40:_(s32) = COPY %9(s32)
+    %29:_(s32) = G_AND %40, %39
+    G_BRCOND %29(s32), %bb.8
+    G_BR %bb.14
+
+  bb.8.b.PHI.1.end:
+    G_STORE %16(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.9.pre.PHI.2:
+    %41:_(s32) = G_CONSTANT i32 1
+    %42:_(s32) = COPY %7(s32)
+    %28:_(s32) = G_AND %42, %41
+    G_BRCOND %28(s32), %bb.10
+    G_BR %bb.11
+
+  bb.10.b.PHI.2.0:
+    %18:_(s64) = G_LOAD %3(p0) :: (load 8 from %ir.a)
+    G_BR %bb.12
+
+  bb.11.b.PHI.2.1:
+    %17:_(s64) = G_LOAD %4(p0) :: (load 8 from %ir.b)
+
+  bb.12.b.PHI.2:
+    %19:_(s64) = G_PHI %18(s64), %bb.10, %17(s64), %bb.11
+    %43:_(s32) = G_CONSTANT i32 1
+    %44:_(s32) = COPY %8(s32)
+    %27:_(s32) = G_AND %44, %43
+    G_BRCOND %27(s32), %bb.14
+
+  bb.13.b.PHI.2.end:
+    G_STORE %19(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+  bb.14.b.PHI.3:
+    %20:_(s64) = G_PHI %19(s64), %bb.12, %16(s64), %bb.7
+    %21:_(s64) = G_PHI %19(s64), %bb.12, %24(s64), %bb.7
+    %45:_(s32) = G_CONSTANT i32 1
+    %46:_(s32) = COPY %9(s32)
+    %26:_(s32) = G_AND %46, %45
+    %22:_(s64) = G_SELECT %26(s32), %20, %21
+    %47:_(s32) = COPY %8(s32)
+    %25:_(s32) = G_AND %47, %45
+    %23:_(s64) = G_SELECT %25(s32), %22, %20
+    G_STORE %23(s64), %6(p0) :: (store 8 into %ir.result)
+    G_STORE %20(s64), %6(p0) :: (store 8 into %ir.result)
+    RetRA
+
+...

Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir?rev=365743&r1=365742&r2=365743&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/phi.mir Thu Jul 11 02:22:49 2019
@@ -32,6 +32,24 @@
     ret i64 %cond
   }
 
+  define void @phi_ambiguous_i64_in_fpr(i1 %cnd, i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {
+  entry:
+    %0 = load i64, i64* %i64_ptr_a, align 4
+    %1 = load i64, i64* %i64_ptr_b, align 4
+    br i1 %cnd, label %cond.true, label %cond.false
+
+  cond.true:                                        ; preds = %entry
+    br label %cond.end
+
+  cond.false:                                       ; preds = %entry
+    br label %cond.end
+
+  cond.end:                                         ; preds = %cond.false, %cond.true
+    %cond = phi i64 [ %0, %cond.true ], [ %1, %cond.false ]
+    store i64 %cond, i64* %i64_ptr_c, align 4
+    ret void
+  }
+
   define float @phi_float(i1 %cnd, float %a, float %b) {
   entry:
     br i1 %cnd, label %cond.true, label %cond.false
@@ -47,6 +65,24 @@
     ret float %cond
   }
 
+  define void @phi_ambiguous_float_in_gpr(i1 %cnd, float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {
+  entry:
+    %0 = load float, float* %f32_ptr_a, align 4
+    %1 = load float, float* %f32_ptr_b, align 4
+    br i1 %cnd, label %cond.true, label %cond.false
+
+  cond.true:                                        ; preds = %entry
+    br label %cond.end
+
+  cond.false:                                       ; preds = %entry
+    br label %cond.end
+
+  cond.end:                                         ; preds = %cond.false, %cond.true
+    %cond = phi float [ %0, %cond.true ], [ %1, %cond.false ]
+    store float %cond, float* %f32_ptr_c, align 4
+    ret void
+  }
+
   define double @phi_double(double %a, double %b, i1 %cnd) {
   entry:
     br i1 %cnd, label %cond.true, label %cond.false
@@ -181,6 +217,62 @@ body:             |
 
 ...
 ---
+name:            phi_ambiguous_i64_in_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  ; MIPS32-LABEL: name: phi_ambiguous_i64_in_fpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY1]](p0) :: (load 8 from %ir.i64_ptr_a, align 4)
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY2]](p0) :: (load 8 from %ir.i64_ptr_b, align 4)
+  ; MIPS32:   [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.1
+  ; MIPS32:   G_BR %bb.2
+  ; MIPS32: bb.1.cond.true:
+  ; MIPS32:   successors: %bb.3(0x80000000)
+  ; MIPS32:   G_BR %bb.3
+  ; MIPS32: bb.2.cond.false:
+  ; MIPS32:   successors: %bb.3(0x80000000)
+  ; MIPS32: bb.3.cond.end:
+  ; MIPS32:   [[PHI:%[0-9]+]]:fprb(s64) = G_PHI [[LOAD]](s64), %bb.1, [[LOAD1]](s64), %bb.2
+  ; MIPS32:   G_STORE [[PHI]](s64), [[COPY3]](p0) :: (store 8 into %ir.i64_ptr_c, align 4)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %4:_(s32) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %5:_(s64) = G_LOAD %1(p0) :: (load 8 from %ir.i64_ptr_a, align 4)
+    %6:_(s64) = G_LOAD %2(p0) :: (load 8 from %ir.i64_ptr_b, align 4)
+    %9:_(s32) = G_CONSTANT i32 1
+    %10:_(s32) = COPY %4(s32)
+    %8:_(s32) = G_AND %10, %9
+    G_BRCOND %8(s32), %bb.2
+    G_BR %bb.3
+
+  bb.2.cond.true:
+    G_BR %bb.4
+
+  bb.3.cond.false:
+
+  bb.4.cond.end:
+    %7:_(s64) = G_PHI %5(s64), %bb.2, %6(s64), %bb.3
+    G_STORE %7(s64), %3(p0) :: (store 8 into %ir.i64_ptr_c, align 4)
+    RetRA
+
+...
+---
 name:            phi_float
 alignment:       2
 legalized:       true
@@ -231,6 +323,62 @@ body:             |
 
 ...
 ---
+name:            phi_ambiguous_float_in_gpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  ; MIPS32-LABEL: name: phi_ambiguous_float_in_gpr
+  ; MIPS32: bb.0.entry:
+  ; MIPS32:   successors: %bb.1(0x40000000), %bb.2(0x40000000)
+  ; MIPS32:   liveins: $a0, $a1, $a2, $a3
+  ; MIPS32:   [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+  ; MIPS32:   [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+  ; MIPS32:   [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+  ; MIPS32:   [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+  ; MIPS32:   [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY1]](p0) :: (load 4 from %ir.f32_ptr_a)
+  ; MIPS32:   [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.f32_ptr_b)
+  ; MIPS32:   [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+  ; MIPS32:   [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+  ; MIPS32:   [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+  ; MIPS32:   G_BRCOND [[AND]](s32), %bb.1
+  ; MIPS32:   G_BR %bb.2
+  ; MIPS32: bb.1.cond.true:
+  ; MIPS32:   successors: %bb.3(0x80000000)
+  ; MIPS32:   G_BR %bb.3
+  ; MIPS32: bb.2.cond.false:
+  ; MIPS32:   successors: %bb.3(0x80000000)
+  ; MIPS32: bb.3.cond.end:
+  ; MIPS32:   [[PHI:%[0-9]+]]:gprb(s32) = G_PHI [[LOAD]](s32), %bb.1, [[LOAD1]](s32), %bb.2
+  ; MIPS32:   G_STORE [[PHI]](s32), [[COPY3]](p0) :: (store 4 into %ir.f32_ptr_c)
+  ; MIPS32:   RetRA
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    %4:_(s32) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %5:_(s32) = G_LOAD %1(p0) :: (load 4 from %ir.f32_ptr_a)
+    %6:_(s32) = G_LOAD %2(p0) :: (load 4 from %ir.f32_ptr_b)
+    %9:_(s32) = G_CONSTANT i32 1
+    %10:_(s32) = COPY %4(s32)
+    %8:_(s32) = G_AND %10, %9
+    G_BRCOND %8(s32), %bb.2
+    G_BR %bb.3
+
+  bb.2.cond.true:
+    G_BR %bb.4
+
+  bb.3.cond.false:
+
+  bb.4.cond.end:
+    %7:_(s32) = G_PHI %5(s32), %bb.2, %6(s32), %bb.3
+    G_STORE %7(s32), %3(p0) :: (store 4 into %ir.f32_ptr_c)
+    RetRA
+
+...
+---
 name:            phi_double
 alignment:       2
 legalized:       true

Modified: llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir?rev=365743&r1=365742&r2=365743&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir (original)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/select.mir Thu Jul 11 02:22:49 2019
@@ -5,7 +5,9 @@
   define void @select_i32(i32, i32) {entry: ret void}
   define void @select_ptr(i32, i32) {entry: ret void}
   define void @select_i64() {entry: ret void}
+  define void @select_ambiguous_i64_in_fpr(i64* %i64_ptr_a, i64* %i64_ptr_b, i64* %i64_ptr_c) {entry: ret void}
   define void @select_float() {entry: ret void}
+  define void @select_ambiguous_float_in_gpr(float* %f32_ptr_a, float* %f32_ptr_b, float* %f32_ptr_c) {entry: ret void}
   define void @select_double() {entry: ret void}
 
 ...
@@ -120,6 +122,43 @@ body:             |
 
 ...
 ---
+name:            select_ambiguous_i64_in_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: select_ambiguous_i64_in_fpr
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+    ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY1]](p0) :: (load 8 from %ir.i64_ptr_a)
+    ; MIPS32: [[LOAD1:%[0-9]+]]:fprb(s64) = G_LOAD [[COPY2]](p0) :: (load 8 from %ir.i64_ptr_b)
+    ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s64) = G_SELECT [[AND]](s32), [[LOAD]], [[LOAD1]]
+    ; MIPS32: G_STORE [[SELECT]](s64), [[COPY3]](p0) :: (store 8 into %ir.i64_ptr_c)
+    ; MIPS32: RetRA
+    %4:_(s32) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %5:_(s64) = G_LOAD %1(p0) :: (load 8 from %ir.i64_ptr_a)
+    %6:_(s64) = G_LOAD %2(p0) :: (load 8 from %ir.i64_ptr_b)
+    %9:_(s32) = G_CONSTANT i32 1
+    %10:_(s32) = COPY %4(s32)
+    %8:_(s32) = G_AND %10, %9
+    %7:_(s64) = G_SELECT %8(s32), %5, %6
+    G_STORE %7(s64), %3(p0) :: (store 8 into %ir.i64_ptr_c)
+    RetRA
+
+...
+---
 name:            select_float
 alignment:       2
 legalized:       true
@@ -151,6 +190,43 @@ body:             |
 
 ...
 ---
+name:            select_ambiguous_float_in_gpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: select_ambiguous_float_in_gpr
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:gprb(p0) = COPY $a3
+    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY1]](p0) :: (load 4 from %ir.f32_ptr_a)
+    ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.f32_ptr_b)
+    ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND]](s32), [[LOAD]], [[LOAD1]]
+    ; MIPS32: G_STORE [[SELECT]](s32), [[COPY3]](p0) :: (store 4 into %ir.f32_ptr_c)
+    ; MIPS32: RetRA
+    %4:_(s32) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %3:_(p0) = COPY $a3
+    %5:_(s32) = G_LOAD %1(p0) :: (load 4 from %ir.f32_ptr_a)
+    %6:_(s32) = G_LOAD %2(p0) :: (load 4 from %ir.f32_ptr_b)
+    %9:_(s32) = G_CONSTANT i32 1
+    %10:_(s32) = COPY %4(s32)
+    %8:_(s32) = G_AND %10, %9
+    %7:_(s32) = G_SELECT %8(s32), %5, %6
+    G_STORE %7(s32), %3(p0) :: (store 4 into %ir.f32_ptr_c)
+    RetRA
+
+...
+---
 name:            select_double
 alignment:       2
 legalized:       true

Added: llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir?rev=365743&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir (added)
+++ llvm/trunk/test/CodeGen/Mips/GlobalISel/regbankselect/test_TypeInfoforMF.mir Thu Jul 11 02:22:49 2019
@@ -0,0 +1,250 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -O0 -mtriple=mipsel-linux-gnu -run-pass=regbankselect -verify-machineinstrs %s -o - | FileCheck %s -check-prefixes=MIPS32
+--- |
+
+  define void @outgoing_gpr(i32* %i32_ptr) {entry: ret void}
+  define void @outgoing_fpr(float* %float_ptr) {entry: ret void}
+  define void @outgoing_gpr_instr(i32* %i32_ptr1, i32* %i32_ptr2) {entry: ret void}
+  define void @outgoing_fpr_instr(float* %float_ptr1, float* %float_ptr2) {entry: ret void}
+  define void @incoming_gpr(i32* %a) {entry: ret void}
+  define void @incoming_fpr(float* %a) {entry: ret void}
+  define void @incoming_i32_instr(i32* %i32_ptr) {entry: ret void}
+  define void @incoming_float_instr(float* %float_ptr) {entry: ret void}
+
+...
+---
+name:            outgoing_gpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0
+
+    ; MIPS32-LABEL: name: outgoing_gpr
+    ; MIPS32: liveins: $a0
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.i32_ptr)
+    ; MIPS32: $v0 = COPY [[LOAD]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(p0) = COPY $a0
+    %1:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.i32_ptr)
+    $v0 = COPY %1(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            outgoing_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0
+
+    ; MIPS32-LABEL: name: outgoing_fpr
+    ; MIPS32: liveins: $a0
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.float_ptr)
+    ; MIPS32: $f0 = COPY [[LOAD]](s32)
+    ; MIPS32: RetRA implicit $f0
+    %0:_(p0) = COPY $a0
+    %1:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.float_ptr)
+    $f0 = COPY %1(s32)
+    RetRA implicit $f0
+
+...
+---
+name:            outgoing_gpr_instr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: outgoing_gpr_instr
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.i32_ptr1)
+    ; MIPS32: [[LOAD1:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY1]](p0) :: (load 4 from %ir.i32_ptr2)
+    ; MIPS32: [[ADD:%[0-9]+]]:gprb(s32) = G_ADD [[LOAD1]], [[LOAD]]
+    ; MIPS32: $v0 = COPY [[ADD]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.i32_ptr1)
+    %3:_(s32) = G_LOAD %1(p0) :: (load 4 from %ir.i32_ptr2)
+    %4:_(s32) = G_ADD %3, %2
+    $v0 = COPY %4(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            outgoing_fpr_instr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1
+
+    ; MIPS32-LABEL: name: outgoing_fpr_instr
+    ; MIPS32: liveins: $a0, $a1
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(p0) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(p0) = COPY $a1
+    ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s32) = G_LOAD [[COPY]](p0) :: (load 4 from %ir.float_ptr1)
+    ; MIPS32: [[LOAD1:%[0-9]+]]:fprb(s32) = G_LOAD [[COPY1]](p0) :: (load 4 from %ir.float_ptr2)
+    ; MIPS32: [[FADD:%[0-9]+]]:fprb(s32) = G_FADD [[LOAD]], [[LOAD1]]
+    ; MIPS32: $f0 = COPY [[FADD]](s32)
+    ; MIPS32: RetRA implicit $f0
+    %0:_(p0) = COPY $a0
+    %1:_(p0) = COPY $a1
+    %2:_(s32) = G_LOAD %0(p0) :: (load 4 from %ir.float_ptr1)
+    %3:_(s32) = G_LOAD %1(p0) :: (load 4 from %ir.float_ptr2)
+    %4:_(s32) = G_FADD %2, %3
+    $f0 = COPY %4(s32)
+    RetRA implicit $f0
+
+...
+---
+name:            incoming_gpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2
+
+    ; MIPS32-LABEL: name: incoming_gpr
+    ; MIPS32: liveins: $a0, $a1, $a2
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.a)
+    ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY3]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND]](s32), [[LOAD]], [[COPY]]
+    ; MIPS32: $v0 = COPY [[SELECT]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %3:_(s32) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %4:_(s32) = G_LOAD %2(p0) :: (load 4 from %ir.a)
+    %7:_(s32) = G_CONSTANT i32 1
+    %8:_(s32) = COPY %3(s32)
+    %6:_(s32) = G_AND %8, %7
+    %5:_(s32) = G_SELECT %6(s32), %4, %0
+    $v0 = COPY %5(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            incoming_fpr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a1, $a2, $f12
+
+    ; MIPS32-LABEL: name: incoming_fpr
+    ; MIPS32: liveins: $a1, $a2, $f12
+    ; MIPS32: [[COPY:%[0-9]+]]:fprb(s32) = COPY $f12
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.a)
+    ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY [[COPY1]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY3]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s32) = G_SELECT [[AND]](s32), [[LOAD]], [[COPY]]
+    ; MIPS32: $f0 = COPY [[SELECT]](s32)
+    ; MIPS32: RetRA implicit $f0
+    %0:_(s32) = COPY $f12
+    %3:_(s32) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %4:_(s32) = G_LOAD %2(p0) :: (load 4 from %ir.a)
+    %7:_(s32) = G_CONSTANT i32 1
+    %8:_(s32) = COPY %3(s32)
+    %6:_(s32) = G_AND %8, %7
+    %5:_(s32) = G_SELECT %6(s32), %4, %0
+    $f0 = COPY %5(s32)
+    RetRA implicit $f0
+
+...
+---
+name:            incoming_i32_instr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a0, $a1, $a2, $a3
+
+    ; MIPS32-LABEL: name: incoming_i32_instr
+    ; MIPS32: liveins: $a0, $a1, $a2, $a3
+    ; MIPS32: [[COPY:%[0-9]+]]:gprb(s32) = COPY $a0
+    ; MIPS32: [[COPY1:%[0-9]+]]:gprb(s32) = COPY $a1
+    ; MIPS32: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY $a3
+    ; MIPS32: [[LOAD:%[0-9]+]]:gprb(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.i32_ptr)
+    ; MIPS32: [[ADD:%[0-9]+]]:gprb(s32) = G_ADD [[COPY1]], [[COPY]]
+    ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY3]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:gprb(s32) = G_SELECT [[AND]](s32), [[LOAD]], [[ADD]]
+    ; MIPS32: $v0 = COPY [[SELECT]](s32)
+    ; MIPS32: RetRA implicit $v0
+    %0:_(s32) = COPY $a0
+    %1:_(s32) = COPY $a1
+    %2:_(p0) = COPY $a2
+    %4:_(s32) = COPY $a3
+    %5:_(s32) = G_LOAD %2(p0) :: (load 4 from %ir.i32_ptr)
+    %6:_(s32) = G_ADD %1, %0
+    %9:_(s32) = G_CONSTANT i32 1
+    %10:_(s32) = COPY %4(s32)
+    %8:_(s32) = G_AND %10, %9
+    %7:_(s32) = G_SELECT %8(s32), %5, %6
+    $v0 = COPY %7(s32)
+    RetRA implicit $v0
+
+...
+---
+name:            incoming_float_instr
+alignment:       2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1.entry:
+    liveins: $a2, $a3, $f12, $f14
+
+    ; MIPS32-LABEL: name: incoming_float_instr
+    ; MIPS32: liveins: $a2, $a3, $f12, $f14
+    ; MIPS32: [[COPY:%[0-9]+]]:fprb(s32) = COPY $f12
+    ; MIPS32: [[COPY1:%[0-9]+]]:fprb(s32) = COPY $f14
+    ; MIPS32: [[COPY2:%[0-9]+]]:gprb(p0) = COPY $a2
+    ; MIPS32: [[COPY3:%[0-9]+]]:gprb(s32) = COPY $a3
+    ; MIPS32: [[LOAD:%[0-9]+]]:fprb(s32) = G_LOAD [[COPY2]](p0) :: (load 4 from %ir.float_ptr)
+    ; MIPS32: [[FADD:%[0-9]+]]:fprb(s32) = G_FADD [[COPY1]], [[COPY]]
+    ; MIPS32: [[C:%[0-9]+]]:gprb(s32) = G_CONSTANT i32 1
+    ; MIPS32: [[COPY4:%[0-9]+]]:gprb(s32) = COPY [[COPY3]](s32)
+    ; MIPS32: [[AND:%[0-9]+]]:gprb(s32) = G_AND [[COPY4]], [[C]]
+    ; MIPS32: [[SELECT:%[0-9]+]]:fprb(s32) = G_SELECT [[AND]](s32), [[LOAD]], [[FADD]]
+    ; MIPS32: $f0 = COPY [[SELECT]](s32)
+    ; MIPS32: RetRA implicit $f0
+    %0:_(s32) = COPY $f12
+    %1:_(s32) = COPY $f14
+    %2:_(p0) = COPY $a2
+    %4:_(s32) = COPY $a3
+    %5:_(s32) = G_LOAD %2(p0) :: (load 4 from %ir.float_ptr)
+    %6:_(s32) = G_FADD %1, %0
+    %9:_(s32) = G_CONSTANT i32 1
+    %10:_(s32) = COPY %4(s32)
+    %8:_(s32) = G_AND %10, %9
+    %7:_(s32) = G_SELECT %8(s32), %5, %6
+    $f0 = COPY %7(s32)
+    RetRA implicit $f0
+
+...




More information about the llvm-commits mailing list