[llvm] [CodeGen][Mips] Remove fp128 libcall list (PR #153798)

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 15 05:39:52 PDT 2025


https://github.com/nikic created https://github.com/llvm/llvm-project/pull/153798

Mips requires fp128 args/returns to be passed differently than i128. It handles this by inspecting the pre-legalization type. However, for soft float libcalls, the original type is currently not provided (it will look like a i128 call). To work around that, MIPS maintains a list of libcalls working on fp128.

This patch removes that list by providing the original, pre-softening type to calling convention lowering. This is done by carrying additional information in CallLoweringInfo, as we unfortunately do need both types (we want the un-softened type for OrigTy, but we need the softened type for the actual register assignment etc.)

This is in preparation for completely removing all the custom pre-analysis code in the Mips backend and replacing it with use of OrigTy.

>From 9080a4972b88acc8bb2d198fa148222217a38fcd Mon Sep 17 00:00:00 2001
From: Nikita Popov <npopov at redhat.com>
Date: Thu, 14 Aug 2025 09:58:20 +0200
Subject: [PATCH] [CodeGen][Mips] Remove fp128 libcall list

Mips requires fp128 args/returns to be passed differently than
i128. It handles this by inspecting the pre-legalization type.
However, for soft float libcalls, the original type is currently
not provided (it will look like a i128 call). To work around that,
MIPS maintains a list of libcalls working on fp128.

This patch removes that list by providing the original, pre-softening
type to calling convention lowering. This is done by carrying
additional information in CallLoweringInfo, as we unfortunately
do need both types (we want the un-softened type for OrigTy, but
we need the softened type for the actual register assignment etc.)

This is in preparation for completely removing all the custom
pre-analysis code in the Mips backend and replacing it with use
of OrigTy.
---
 llvm/include/llvm/CodeGen/TargetLowering.h    |  29 +-
 .../SelectionDAG/SelectionDAGBuilder.cpp      |  18 +-
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  24 +-
 llvm/lib/Target/Mips/MipsCCState.cpp          |  56 +---
 llvm/lib/Target/Mips/MipsCCState.h            |  43 ++-
 llvm/lib/Target/Mips/MipsCallLowering.cpp     |  35 +--
 llvm/lib/Target/Mips/MipsFastISel.cpp         |   4 +-
 llvm/lib/Target/Mips/MipsISelLowering.cpp     |   8 +-
 llvm/test/CodeGen/Mips/fmuladd-soft-float.ll  | 274 +++++++++---------
 9 files changed, 244 insertions(+), 247 deletions(-)

diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index ed7495694cc70..bbd720dfc451d 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -301,6 +301,9 @@ class LLVM_ABI TargetLoweringBase {
   public:
     Value *Val = nullptr;
     SDValue Node = SDValue();
+    /// Original unlegalized argument type.
+    Type *OrigTy = nullptr;
+    /// Same as OrigTy, or partially legalized for soft float libcalls.
     Type *Ty = nullptr;
     bool IsSExt : 1;
     bool IsZExt : 1;
@@ -4672,6 +4675,9 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
   /// implementation.
   struct CallLoweringInfo {
     SDValue Chain;
+    /// Original unlegalized return type.
+    Type *OrigRetTy = nullptr;
+    /// Same as OrigRetTy, or partially legalized for soft float libcalls.
     Type *RetTy = nullptr;
     bool RetSExt           : 1;
     bool RetZExt           : 1;
@@ -4726,11 +4732,20 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
     // setCallee with target/module-specific attributes
     CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
                                    SDValue Target, ArgListTy &&ArgsList) {
+      return setLibCallee(CC, ResultType, ResultType, Target,
+                          std::move(ArgsList));
+    }
+
+    CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
+                                   Type *OrigResultType, SDValue Target,
+                                   ArgListTy &&ArgsList) {
+      OrigRetTy = OrigResultType;
       RetTy = ResultType;
       Callee = Target;
       CallConv = CC;
       NumFixedArgs = ArgsList.size();
       Args = std::move(ArgsList);
+      initOrigArgTys();
 
       DAG.getTargetLoweringInfo().markLibCallAttributes(
           &(DAG.getMachineFunction()), CC, Args);
@@ -4740,7 +4755,7 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
     CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
                                 SDValue Target, ArgListTy &&ArgsList,
                                 AttributeSet ResultAttrs = {}) {
-      RetTy = ResultType;
+      RetTy = OrigRetTy = ResultType;
       IsInReg = ResultAttrs.hasAttribute(Attribute::InReg);
       RetSExt = ResultAttrs.hasAttribute(Attribute::SExt);
       RetZExt = ResultAttrs.hasAttribute(Attribute::ZExt);
@@ -4750,13 +4765,14 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
       CallConv = CC;
       NumFixedArgs = ArgsList.size();
       Args = std::move(ArgsList);
+      initOrigArgTys();
       return *this;
     }
 
     CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
                                 SDValue Target, ArgListTy &&ArgsList,
                                 const CallBase &Call) {
-      RetTy = ResultType;
+      RetTy = OrigRetTy = ResultType;
 
       IsInReg = Call.hasRetAttr(Attribute::InReg);
       DoesNotReturn =
@@ -4773,6 +4789,7 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
       CallConv = Call.getCallingConv();
       NumFixedArgs = FTy->getNumParams();
       Args = std::move(ArgsList);
+      initOrigArgTys();
 
       CB = &Call;
 
@@ -4852,6 +4869,14 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
     ArgListTy &getArgs() {
       return Args;
     }
+
+  private:
+    void initOrigArgTys() {
+      for (ArgListEntry &Arg : Args) {
+        if (!Arg.OrigTy)
+          Arg.OrigTy = Arg.Ty;
+      }
+    }
   };
 
   /// This structure is used to pass arguments to makeLibCall function.
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 2eaab02130699..52e7f0feffbf3 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -11031,6 +11031,12 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
   for (Type *Ty : RetOrigTys)
     RetTys.push_back(getValueType(DL, Ty));
 
+  if (CLI.RetTy != CLI.OrigRetTy) {
+    assert(RetOrigTys.size() == 1 &&
+           "Only supported for non-aggregate returns");
+    RetOrigTys[0] = CLI.OrigRetTy;
+  }
+
   if (CLI.IsPostTypeLegalization) {
     // If we are lowering a libcall after legalization, split the return type.
     SmallVector<Type *, 4> OldRetOrigTys;
@@ -11076,7 +11082,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
     DemoteStackSlot = CLI.DAG.getFrameIndex(DemoteStackIdx, getFrameIndexTy(DL));
     ArgListEntry Entry;
     Entry.Node = DemoteStackSlot;
-    Entry.Ty = StackSlotPtrType;
+    Entry.Ty = Entry.OrigTy = StackSlotPtrType;
     Entry.IsSExt = false;
     Entry.IsZExt = false;
     Entry.IsInReg = false;
@@ -11093,7 +11099,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
     CLI.NumFixedArgs += 1;
     CLI.getArgs()[0].IndirectType = CLI.RetTy;
-    CLI.RetTy = Type::getVoidTy(Context);
+    CLI.RetTy = CLI.OrigRetTy = Type::getVoidTy(Context);
 
     // sret demotion isn't compatible with tail-calls, since the sret argument
     // points into the callers stack frame.
@@ -11161,6 +11167,12 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
     for (unsigned Value = 0, NumValues = ArgTys.size(); Value != NumValues;
          ++Value) {
       Type *ArgTy = ArgTys[Value];
+      Type *OrigArgTy = ArgTy;
+      if (Args[i].Ty != Args[i].OrigTy) {
+        assert(Value == 0 && "Only supported for non-aggregate arguments");
+        OrigArgTy = Args[i].OrigTy;
+      }
+
       EVT VT = getValueType(DL, ArgTy);
       SDValue Op = SDValue(Args[i].Node.getNode(),
                            Args[i].Node.getResNo() + Value);
@@ -11294,7 +11306,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
         // For scalable vectors the scalable part is currently handled
         // by individual targets, so we just use the known minimum size here.
         ISD::OutputArg MyFlags(
-            Flags, Parts[j].getValueType().getSimpleVT(), VT, ArgTy, i,
+            Flags, Parts[j].getValueType().getSimpleVT(), VT, OrigArgTy, i,
             j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
         if (NumParts > 1 && j == 0)
           MyFlags.Flags.setSplit();
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 911bbabc42aa3..1377aef860754 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -167,9 +167,16 @@ TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
   for (unsigned i = 0; i < Ops.size(); ++i) {
     SDValue NewOp = Ops[i];
     Entry.Node = NewOp;
-    Entry.Ty = i < OpsTypeOverrides.size() && OpsTypeOverrides[i]
-                   ? OpsTypeOverrides[i]
-                   : Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
+    if (i < OpsTypeOverrides.size() && OpsTypeOverrides[i])
+      Entry.Ty = Entry.OrigTy = OpsTypeOverrides[i];
+    else {
+      Entry.Ty = Entry.OrigTy =
+          Entry.Node.getValueType().getTypeForEVT(*DAG.getContext());
+      if (CallOptions.IsSoften)
+        Entry.OrigTy =
+            CallOptions.OpsVTBeforeSoften[i].getTypeForEVT(*DAG.getContext());
+    }
+
     Entry.IsSExt =
         shouldSignExtendTypeInLibCall(Entry.Ty, CallOptions.IsSigned);
     Entry.IsZExt = !Entry.IsSExt;
@@ -189,18 +196,21 @@ TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
       DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout()));
 
   Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
+  Type *OrigRetTy = RetTy;
   TargetLowering::CallLoweringInfo CLI(DAG);
   bool signExtend = shouldSignExtendTypeInLibCall(RetTy, CallOptions.IsSigned);
   bool zeroExtend = !signExtend;
 
-  if (CallOptions.IsSoften &&
-      !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) {
-    signExtend = zeroExtend = false;
+  if (CallOptions.IsSoften) {
+    OrigRetTy = CallOptions.RetVTBeforeSoften.getTypeForEVT(*DAG.getContext());
+    if (!shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften))
+      signExtend = zeroExtend = false;
   }
 
   CLI.setDebugLoc(dl)
       .setChain(InChain)
-      .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
+      .setLibCallee(getLibcallCallingConv(LC), RetTy, OrigRetTy, Callee,
+                    std::move(Args))
       .setNoReturn(CallOptions.DoesNotReturn)
       .setDiscardResult(!CallOptions.IsReturnValueUsed)
       .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization)
diff --git a/llvm/lib/Target/Mips/MipsCCState.cpp b/llvm/lib/Target/Mips/MipsCCState.cpp
index d600343860b0b..d7b5633d7077e 100644
--- a/llvm/lib/Target/Mips/MipsCCState.cpp
+++ b/llvm/lib/Target/Mips/MipsCCState.cpp
@@ -12,31 +12,9 @@
 
 using namespace llvm;
 
-bool MipsCCState::isF128SoftLibCall(const char *CallSym) {
-  const char *const LibCalls[] = {
-      "__addtf3",      "__divtf3",     "__eqtf2",       "__extenddftf2",
-      "__extendsftf2", "__fixtfdi",    "__fixtfsi",     "__fixtfti",
-      "__fixunstfdi",  "__fixunstfsi", "__fixunstfti",  "__floatditf",
-      "__floatsitf",   "__floattitf",  "__floatunditf", "__floatunsitf",
-      "__floatuntitf", "__getf2",      "__gttf2",       "__letf2",
-      "__lttf2",       "__multf3",     "__netf2",       "__powitf2",
-      "__subtf3",      "__trunctfdf2", "__trunctfsf2",  "__unordtf2",
-      "ceill",         "copysignl",    "cosl",          "exp2l",
-      "expl",          "floorl",       "fmal",          "fmaxl",
-      "fmodl",         "frexpl",       "log10l",        "log2l",
-      "logl",          "nearbyintl",   "powl",          "rintl",
-      "roundl",        "sincosl",      "sinl",          "sqrtl",
-      "truncl"};
-
-  // Check that LibCalls is sorted alphabetically.
-  auto Comp = [](const char *S1, const char *S2) { return strcmp(S1, S2) < 0; };
-  assert(llvm::is_sorted(LibCalls, Comp));
-  return llvm::binary_search(LibCalls, CallSym, Comp);
-}
-
 /// This function returns true if Ty is fp128, {f128} or i128 which was
 /// originally a fp128.
-bool MipsCCState::originalTypeIsF128(const Type *Ty, const char *Func) {
+bool MipsCCState::originalTypeIsF128(const Type *Ty) {
   if (Ty->isFP128Ty())
     return true;
 
@@ -44,10 +22,7 @@ bool MipsCCState::originalTypeIsF128(const Type *Ty, const char *Func) {
       Ty->getStructElementType(0)->isFP128Ty())
     return true;
 
-  // If the Ty is i128 and the function being called is a long double emulation
-  // routine, then the original type is f128.
-  // FIXME: This is unsound because these functions could be indirectly called
-  return (Func && Ty->isIntegerTy(128) && isF128SoftLibCall(Func));
+  return false;
 }
 
 /// Return true if the original type was vXfXX.
@@ -84,11 +59,9 @@ MipsCCState::getSpecialCallingConvForCallee(const SDNode *Callee,
 }
 
 void MipsCCState::PreAnalyzeCallResultForF128(
-    const SmallVectorImpl<ISD::InputArg> &Ins,
-    const Type *RetTy, const char *Call) {
+    const SmallVectorImpl<ISD::InputArg> &Ins, const Type *RetTy) {
   for (unsigned i = 0; i < Ins.size(); ++i) {
-    OriginalArgWasF128.push_back(
-        originalTypeIsF128(RetTy, Call));
+    OriginalArgWasF128.push_back(originalTypeIsF128(RetTy));
     OriginalArgWasFloat.push_back(RetTy->isFloatingPointTy());
   }
 }
@@ -98,8 +71,7 @@ void MipsCCState::PreAnalyzeCallResultForF128(
 void MipsCCState::PreAnalyzeCallReturnForF128(
     const SmallVectorImpl<ISD::OutputArg> &Outs, const Type *RetTy) {
   for (unsigned i = 0; i < Outs.size(); ++i) {
-    OriginalArgWasF128.push_back(
-        originalTypeIsF128(RetTy, nullptr));
+    OriginalArgWasF128.push_back(originalTypeIsF128(RetTy));
     OriginalArgWasFloat.push_back(
         RetTy->isFloatingPointTy());
   }
@@ -129,8 +101,8 @@ void MipsCCState::PreAnalyzeReturnValue(EVT ArgVT) {
   OriginalRetWasFloatVector.push_back(originalEVTTypeIsVectorFloat(ArgVT));
 }
 
-void MipsCCState::PreAnalyzeCallOperand(const Type *ArgTy, const char *Func) {
-  OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy, Func));
+void MipsCCState::PreAnalyzeCallOperand(const Type *ArgTy) {
+  OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy));
   OriginalArgWasFloat.push_back(ArgTy->isFloatingPointTy());
   OriginalArgWasFloatVector.push_back(ArgTy->isVectorTy());
 }
@@ -139,14 +111,13 @@ void MipsCCState::PreAnalyzeCallOperand(const Type *ArgTy, const char *Func) {
 /// arguments and record this.
 void MipsCCState::PreAnalyzeCallOperands(
     const SmallVectorImpl<ISD::OutputArg> &Outs,
-    std::vector<TargetLowering::ArgListEntry> &FuncArgs,
-    const char *Func) {
+    std::vector<TargetLowering::ArgListEntry> &FuncArgs) {
   for (unsigned i = 0; i < Outs.size(); ++i) {
     TargetLowering::ArgListEntry FuncArg = FuncArgs[Outs[i].OrigArgIndex];
 
-    OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg.Ty, Func));
-    OriginalArgWasFloat.push_back(FuncArg.Ty->isFloatingPointTy());
-    OriginalArgWasFloatVector.push_back(FuncArg.Ty->isVectorTy());
+    OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg.OrigTy));
+    OriginalArgWasFloat.push_back(FuncArg.OrigTy->isFloatingPointTy());
+    OriginalArgWasFloatVector.push_back(FuncArg.OrigTy->isVectorTy());
   }
 }
 
@@ -162,7 +133,7 @@ void MipsCCState::PreAnalyzeFormalArgument(const Type *ArgTy,
     return;
   }
 
-  OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy, nullptr));
+  OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy));
   OriginalArgWasFloat.push_back(ArgTy->isFloatingPointTy());
 
   // The MIPS vector ABI exhibits a corner case of sorts or quirk; if the
@@ -192,8 +163,7 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128(
     assert(Ins[i].getOrigArgIndex() < MF.getFunction().arg_size());
     std::advance(FuncArg, Ins[i].getOrigArgIndex());
 
-    OriginalArgWasF128.push_back(
-        originalTypeIsF128(FuncArg->getType(), nullptr));
+    OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg->getType()));
     OriginalArgWasFloat.push_back(FuncArg->getType()->isFloatingPointTy());
 
     // The MIPS vector ABI exhibits a corner case of sorts or quirk; if the
diff --git a/llvm/lib/Target/Mips/MipsCCState.h b/llvm/lib/Target/Mips/MipsCCState.h
index 30b68e8a9c969..4d985518ce7c5 100644
--- a/llvm/lib/Target/Mips/MipsCCState.h
+++ b/llvm/lib/Target/Mips/MipsCCState.h
@@ -26,17 +26,11 @@ class MipsCCState : public CCState {
   getSpecialCallingConvForCallee(const SDNode *Callee,
                                  const MipsSubtarget &Subtarget);
 
-  /// This function returns true if CallSym is a long double emulation routine.
-  ///
-  /// FIXME: Changing the ABI based on the callee name is unsound. The lib func
-  /// address could be captured.
-  static bool isF128SoftLibCall(const char *CallSym);
-
-  static bool originalTypeIsF128(const Type *Ty, const char *Func);
+  static bool originalTypeIsF128(const Type *Ty);
   static bool originalEVTTypeIsVectorFloat(EVT Ty);
   static bool originalTypeIsVectorFloat(const Type *Ty);
 
-  void PreAnalyzeCallOperand(const Type *ArgTy, const char *Func);
+  void PreAnalyzeCallOperand(const Type *ArgTy);
 
   void PreAnalyzeFormalArgument(const Type *ArgTy, ISD::ArgFlagsTy Flags);
   void PreAnalyzeReturnValue(EVT ArgVT);
@@ -45,7 +39,7 @@ class MipsCCState : public CCState {
   /// Identify lowered values that originated from f128 arguments and record
   /// this for use by RetCC_MipsN.
   void PreAnalyzeCallResultForF128(const SmallVectorImpl<ISD::InputArg> &Ins,
-                                   const Type *RetTy, const char * Func);
+                                   const Type *RetTy);
 
   /// Identify lowered values that originated from f128 arguments and record
   /// this for use by RetCC_MipsN.
@@ -55,8 +49,7 @@ class MipsCCState : public CCState {
   /// this.
   void
   PreAnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
-                         std::vector<TargetLowering::ArgListEntry> &FuncArgs,
-                         const char *Func);
+                         std::vector<TargetLowering::ArgListEntry> &FuncArgs);
 
   /// Identify lowered values that originated from f128 arguments and record
   /// this for use by RetCC_MipsN.
@@ -96,21 +89,21 @@ class MipsCCState : public CCState {
               SpecialCallingConvType SpecialCC = NoSpecialCallingConv)
       : CCState(CC, isVarArg, MF, locs, C), SpecialCallingConv(SpecialCC) {}
 
-  void PreAnalyzeCallOperands(
-      const SmallVectorImpl<ISD::OutputArg> &Outs, CCAssignFn Fn,
-      std::vector<TargetLowering::ArgListEntry> &FuncArgs, const char *Func) {
+  void
+  PreAnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
+                         CCAssignFn Fn,
+                         std::vector<TargetLowering::ArgListEntry> &FuncArgs) {
     OriginalArgWasF128.clear();
     OriginalArgWasFloat.clear();
     OriginalArgWasFloatVector.clear();
-    PreAnalyzeCallOperands(Outs, FuncArgs, Func);
+    PreAnalyzeCallOperands(Outs, FuncArgs);
   }
 
   void
   AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
                       CCAssignFn Fn,
-                      std::vector<TargetLowering::ArgListEntry> &FuncArgs,
-                      const char *Func) {
-    PreAnalyzeCallOperands(Outs, Fn, FuncArgs, Func);
+                      std::vector<TargetLowering::ArgListEntry> &FuncArgs) {
+    PreAnalyzeCallOperands(Outs, Fn, FuncArgs);
     CCState::AnalyzeCallOperands(Outs, Fn);
   }
 
@@ -137,26 +130,24 @@ class MipsCCState : public CCState {
     CCState::AnalyzeFormalArguments(Ins, Fn);
   }
 
-  void PreAnalyzeCallResult(const Type *RetTy, const char *Func) {
-    OriginalArgWasF128.push_back(originalTypeIsF128(RetTy, Func));
+  void PreAnalyzeCallResult(const Type *RetTy) {
+    OriginalArgWasF128.push_back(originalTypeIsF128(RetTy));
     OriginalArgWasFloat.push_back(RetTy->isFloatingPointTy());
     OriginalRetWasFloatVector.push_back(originalTypeIsVectorFloat(RetTy));
   }
 
   void PreAnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
-                            CCAssignFn Fn, const Type *RetTy,
-                            const char *Func) {
+                            CCAssignFn Fn, const Type *RetTy) {
     OriginalArgWasFloat.clear();
     OriginalArgWasF128.clear();
     OriginalArgWasFloatVector.clear();
-    PreAnalyzeCallResultForF128(Ins, RetTy, Func);
+    PreAnalyzeCallResultForF128(Ins, RetTy);
     PreAnalyzeCallResultForVectorFloat(Ins, RetTy);
   }
 
   void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
-                         CCAssignFn Fn, const Type *RetTy,
-                         const char *Func) {
-    PreAnalyzeCallResult(Ins, Fn, RetTy, Func);
+                         CCAssignFn Fn, const Type *RetTy) {
+    PreAnalyzeCallResult(Ins, Fn, RetTy);
     CCState::AnalyzeCallResult(Ins, Fn);
   }
 
diff --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp
index fa491086b0ac9..5b67346209731 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp
@@ -27,16 +27,11 @@ MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
 
 namespace {
 struct MipsOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
-  /// This is the name of the function being called
-  /// FIXME: Relying on this is unsound
-  const char *Func = nullptr;
-
   /// Is this a return value, or an outgoing call operand.
   bool IsReturn;
 
-  MipsOutgoingValueAssigner(CCAssignFn *AssignFn_, const char *Func,
-                            bool IsReturn)
-      : OutgoingValueAssigner(AssignFn_), Func(Func), IsReturn(IsReturn) {}
+  MipsOutgoingValueAssigner(CCAssignFn *AssignFn_, bool IsReturn)
+      : OutgoingValueAssigner(AssignFn_), IsReturn(IsReturn) {}
 
   bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
                  CCValAssign::LocInfo LocInfo,
@@ -47,7 +42,7 @@ struct MipsOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
     if (IsReturn)
       State.PreAnalyzeReturnValue(EVT::getEVT(Info.Ty));
     else
-      State.PreAnalyzeCallOperand(Info.Ty, Func);
+      State.PreAnalyzeCallOperand(Info.Ty);
 
     return CallLowering::OutgoingValueAssigner::assignArg(
         ValNo, OrigVT, ValVT, LocVT, LocInfo, Info, Flags, State);
@@ -55,16 +50,11 @@ struct MipsOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
 };
 
 struct MipsIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
-  /// This is the name of the function being called
-  /// FIXME: Relying on this is unsound
-  const char *Func = nullptr;
-
   /// Is this a call return value, or an incoming function argument.
   bool IsReturn;
 
-  MipsIncomingValueAssigner(CCAssignFn *AssignFn_, const char *Func,
-                            bool IsReturn)
-      : IncomingValueAssigner(AssignFn_), Func(Func), IsReturn(IsReturn) {}
+  MipsIncomingValueAssigner(CCAssignFn *AssignFn_, bool IsReturn)
+      : IncomingValueAssigner(AssignFn_), IsReturn(IsReturn) {}
 
   bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
                  CCValAssign::LocInfo LocInfo,
@@ -73,7 +63,7 @@ struct MipsIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
     MipsCCState &State = static_cast<MipsCCState &>(State_);
 
     if (IsReturn)
-      State.PreAnalyzeCallResult(Info.Ty, Func);
+      State.PreAnalyzeCallResult(Info.Ty);
     else
       State.PreAnalyzeFormalArgument(Info.Ty, Flags);
 
@@ -339,9 +329,8 @@ bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
                        F.getContext());
 
     MipsOutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
-    std::string FuncName = F.getName().str();
     MipsOutgoingValueAssigner Assigner(TLI.CCAssignFnForReturn(),
-                                       FuncName.c_str(), /*IsReturn*/ true);
+                                       /*IsReturn*/ true);
 
     if (!determineAssignments(Assigner, RetInfos, CCInfo))
       return false;
@@ -392,8 +381,7 @@ bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
                        Align(1));
 
-  const std::string FuncName = F.getName().str();
-  MipsIncomingValueAssigner Assigner(TLI.CCAssignFnForCall(), FuncName.c_str(),
+  MipsIncomingValueAssigner Assigner(TLI.CCAssignFnForCall(),
                                      /*IsReturn*/ false);
   if (!determineAssignments(Assigner, ArgInfos, CCInfo))
     return false;
@@ -510,10 +498,7 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv),
                        Align(1));
 
-  const char *Call =
-      Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr;
-
-  MipsOutgoingValueAssigner Assigner(TLI.CCAssignFnForCall(), Call,
+  MipsOutgoingValueAssigner Assigner(TLI.CCAssignFnForCall(),
                                      /*IsReturn*/ false);
   if (!determineAssignments(Assigner, ArgInfos, CCInfo))
     return false;
@@ -550,10 +535,8 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
     CallLowering::splitToValueTypes(Info.OrigRet, ArgInfos, DL,
                                     F.getCallingConv());
 
-    const std::string FuncName = F.getName().str();
     SmallVector<CCValAssign, 8> ArgLocs;
     MipsIncomingValueAssigner Assigner(TLI.CCAssignFnForReturn(),
-                                       FuncName.c_str(),
                                        /*IsReturn*/ true);
     CallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
 
diff --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index a9ac0eae5dace..94fb3cc356819 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -1293,9 +1293,7 @@ bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
     SmallVector<CCValAssign, 16> RVLocs;
     MipsCCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
 
-    CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips, CLI.RetTy,
-                             CLI.Symbol ? CLI.Symbol->getName().data()
-                                        : nullptr);
+    CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips, CLI.RetTy);
 
     // Only handle a single return value.
     if (RVLocs.size() != 1)
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 9fbbdb2ecb264..935d60b23ac35 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -3394,8 +3394,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
       MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
   CCInfo.AllocateStack(ReservedArgArea, Align(1));
 
-  CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
-                             ES ? ES->getSymbol() : nullptr);
+  CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs());
 
   // Get a count of how many bytes are to be pushed on the stack.
   unsigned StackSize = CCInfo.getStackSize();
@@ -3690,10 +3689,7 @@ SDValue MipsTargetLowering::LowerCallResult(
   MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
                      *DAG.getContext());
 
-  const ExternalSymbolSDNode *ES =
-      dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
-  CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
-                           ES ? ES->getSymbol() : nullptr);
+  CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.OrigRetTy);
 
   // Copy all of the result registers out of their specified physreg.
   for (unsigned i = 0; i != RVLocs.size(); ++i) {
diff --git a/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll b/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll
index bbfb7cf9ca907..409b1a1f818ab 100644
--- a/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll
+++ b/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll
@@ -49,13 +49,11 @@ define float @fmuladd_intrinsic_f32(float %a, float %b, float %c) #0 {
 ; SOFT-FLOAT-64-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 31, -8
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 16, -16
-; SOFT-FLOAT-64-NEXT:    move $16, $6
-; SOFT-FLOAT-64-NEXT:    sll $4, $4, 0
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $5, 0
-; SOFT-FLOAT-64-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64-NEXT:    move $16, $6
+; SOFT-FLOAT-64-NEXT:    move $4, $2
 ; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64-NEXT:    move $5, $16
 ; SOFT-FLOAT-64-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    jr $ra
@@ -69,13 +67,11 @@ define float @fmuladd_intrinsic_f32(float %a, float %b, float %c) #0 {
 ; SOFT-FLOAT-64R2-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 31, -8
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 16, -16
-; SOFT-FLOAT-64R2-NEXT:    move $16, $6
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $4, 0
 ; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $5, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64R2-NEXT:    move $16, $6
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64R2-NEXT:    move $5, $16
 ; SOFT-FLOAT-64R2-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    jr $ra
@@ -203,13 +199,11 @@ define float @fmuladd_contract_f32(float %a, float %b, float %c) #0 {
 ; SOFT-FLOAT-64-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 31, -8
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 16, -16
-; SOFT-FLOAT-64-NEXT:    move $16, $6
-; SOFT-FLOAT-64-NEXT:    sll $4, $4, 0
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $5, 0
-; SOFT-FLOAT-64-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64-NEXT:    move $16, $6
+; SOFT-FLOAT-64-NEXT:    move $4, $2
 ; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64-NEXT:    move $5, $16
 ; SOFT-FLOAT-64-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    jr $ra
@@ -223,13 +217,11 @@ define float @fmuladd_contract_f32(float %a, float %b, float %c) #0 {
 ; SOFT-FLOAT-64R2-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 31, -8
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 16, -16
-; SOFT-FLOAT-64R2-NEXT:    move $16, $6
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $4, 0
 ; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $5, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64R2-NEXT:    move $16, $6
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64R2-NEXT:    move $5, $16
 ; SOFT-FLOAT-64R2-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    jr $ra
@@ -443,149 +435,169 @@ define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x
 ;
 ; SOFT-FLOAT-64-LABEL: fmuladd_contract_v4f32:
 ; SOFT-FLOAT-64:       # %bb.0:
-; SOFT-FLOAT-64-NEXT:    daddiu $sp, $sp, -64
-; SOFT-FLOAT-64-NEXT:    .cfi_def_cfa_offset 64
-; SOFT-FLOAT-64-NEXT:    sd $ra, 56($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $22, 48($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $21, 40($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $20, 32($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $19, 24($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $18, 16($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $17, 8($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    daddiu $sp, $sp, -80
+; SOFT-FLOAT-64-NEXT:    .cfi_def_cfa_offset 80
+; SOFT-FLOAT-64-NEXT:    sd $ra, 72($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $23, 64($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $22, 56($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $21, 48($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $20, 40($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $19, 32($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $18, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $17, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $16, 8($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 31, -8
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 22, -16
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 21, -24
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 20, -32
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 19, -40
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 18, -48
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 17, -56
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 16, -64
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 23, -16
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 22, -24
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 21, -32
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 20, -40
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 19, -48
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 18, -56
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 17, -64
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 16, -72
 ; SOFT-FLOAT-64-NEXT:    move $16, $9
-; SOFT-FLOAT-64-NEXT:    move $17, $8
-; SOFT-FLOAT-64-NEXT:    move $18, $7
-; SOFT-FLOAT-64-NEXT:    move $19, $6
-; SOFT-FLOAT-64-NEXT:    move $20, $5
+; SOFT-FLOAT-64-NEXT:    move $19, $8
+; SOFT-FLOAT-64-NEXT:    move $17, $7
+; SOFT-FLOAT-64-NEXT:    move $20, $6
+; SOFT-FLOAT-64-NEXT:    move $18, $5
 ; SOFT-FLOAT-64-NEXT:    move $21, $4
-; SOFT-FLOAT-64-NEXT:    sll $4, $4, 0
+; SOFT-FLOAT-64-NEXT:    sll $4, $21, 0
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $6, 0
+; SOFT-FLOAT-64-NEXT:    sll $5, $20, 0
+; SOFT-FLOAT-64-NEXT:    move $4, $2
+; SOFT-FLOAT-64-NEXT:    jal __addsf3
+; SOFT-FLOAT-64-NEXT:    sll $5, $19, 0
 ; SOFT-FLOAT-64-NEXT:    move $22, $2
-; SOFT-FLOAT-64-NEXT:    dsra $4, $21, 32
+; SOFT-FLOAT-64-NEXT:    sll $4, $18, 0
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    dsra $5, $19, 32
-; SOFT-FLOAT-64-NEXT:    sll $4, $2, 0
-; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    dsra $5, $17, 32
-; SOFT-FLOAT-64-NEXT:    # kill: def $v0 killed $v0 def $v0_64
-; SOFT-FLOAT-64-NEXT:    sll $4, $22, 0
 ; SOFT-FLOAT-64-NEXT:    sll $5, $17, 0
+; SOFT-FLOAT-64-NEXT:    move $23, $2
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $21, 32
+; SOFT-FLOAT-64-NEXT:    sll $4, $1, 0
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $20, 32
+; SOFT-FLOAT-64-NEXT:    jal __mulsf3
+; SOFT-FLOAT-64-NEXT:    sll $5, $1, 0
+; SOFT-FLOAT-64-NEXT:    move $4, $2
+; SOFT-FLOAT-64-NEXT:    dsll $1, $22, 32
+; SOFT-FLOAT-64-NEXT:    dsrl $2, $19, 32
+; SOFT-FLOAT-64-NEXT:    sll $5, $2, 0
 ; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    dsll $17, $2, 32
+; SOFT-FLOAT-64-NEXT:    dsrl $19, $1, 32
+; SOFT-FLOAT-64-NEXT:    # kill: def $v0 killed $v0 def $v0_64
 ; SOFT-FLOAT-64-NEXT:    dsll $1, $2, 32
-; SOFT-FLOAT-64-NEXT:    dsrl $1, $1, 32
-; SOFT-FLOAT-64-NEXT:    sll $4, $20, 0
-; SOFT-FLOAT-64-NEXT:    sll $5, $18, 0
-; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    or $17, $1, $17
-; SOFT-FLOAT-64-NEXT:    move $19, $2
-; SOFT-FLOAT-64-NEXT:    dsra $4, $20, 32
+; SOFT-FLOAT-64-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64-NEXT:    or $19, $19, $1
+; SOFT-FLOAT-64-NEXT:    jal __addsf3
+; SOFT-FLOAT-64-NEXT:    move $4, $23
+; SOFT-FLOAT-64-NEXT:    move $20, $2
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $18, 32
+; SOFT-FLOAT-64-NEXT:    sll $4, $1, 0
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $17, 32
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    dsra $5, $18, 32
-; SOFT-FLOAT-64-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64-NEXT:    sll $5, $1, 0
+; SOFT-FLOAT-64-NEXT:    move $4, $2
+; SOFT-FLOAT-64-NEXT:    dsll $1, $20, 32
+; SOFT-FLOAT-64-NEXT:    dsrl $17, $1, 32
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $16, 32
 ; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    dsra $5, $16, 32
+; SOFT-FLOAT-64-NEXT:    sll $5, $1, 0
 ; SOFT-FLOAT-64-NEXT:    # kill: def $v0 killed $v0 def $v0_64
-; SOFT-FLOAT-64-NEXT:    dsll $18, $2, 32
-; SOFT-FLOAT-64-NEXT:    sll $4, $19, 0
-; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $16, 0
 ; SOFT-FLOAT-64-NEXT:    dsll $1, $2, 32
-; SOFT-FLOAT-64-NEXT:    dsrl $1, $1, 32
-; SOFT-FLOAT-64-NEXT:    or $3, $1, $18
-; SOFT-FLOAT-64-NEXT:    move $2, $17
-; SOFT-FLOAT-64-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $17, 8($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $18, 16($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $19, 24($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $20, 32($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $21, 40($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $22, 48($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $ra, 56($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    or $3, $17, $1
+; SOFT-FLOAT-64-NEXT:    move $2, $19
+; SOFT-FLOAT-64-NEXT:    ld $16, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $17, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $18, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $19, 32($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $20, 40($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $21, 48($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $22, 56($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $23, 64($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $ra, 72($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    jr $ra
-; SOFT-FLOAT-64-NEXT:    daddiu $sp, $sp, 64
+; SOFT-FLOAT-64-NEXT:    daddiu $sp, $sp, 80
 ;
 ; SOFT-FLOAT-64R2-LABEL: fmuladd_contract_v4f32:
 ; SOFT-FLOAT-64R2:       # %bb.0:
-; SOFT-FLOAT-64R2-NEXT:    daddiu $sp, $sp, -64
-; SOFT-FLOAT-64R2-NEXT:    .cfi_def_cfa_offset 64
-; SOFT-FLOAT-64R2-NEXT:    sd $ra, 56($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $22, 48($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $21, 40($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $20, 32($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $19, 24($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $18, 16($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $17, 8($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    daddiu $sp, $sp, -80
+; SOFT-FLOAT-64R2-NEXT:    .cfi_def_cfa_offset 80
+; SOFT-FLOAT-64R2-NEXT:    sd $ra, 72($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $23, 64($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $22, 56($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $21, 48($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $20, 40($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $19, 32($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $18, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $17, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $16, 8($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 31, -8
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 22, -16
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 21, -24
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 20, -32
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 19, -40
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 18, -48
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 17, -56
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 16, -64
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 23, -16
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 22, -24
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 21, -32
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 20, -40
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 19, -48
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 18, -56
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 17, -64
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 16, -72
 ; SOFT-FLOAT-64R2-NEXT:    move $16, $9
-; SOFT-FLOAT-64R2-NEXT:    move $17, $8
-; SOFT-FLOAT-64R2-NEXT:    move $18, $7
-; SOFT-FLOAT-64R2-NEXT:    move $19, $6
-; SOFT-FLOAT-64R2-NEXT:    move $20, $5
+; SOFT-FLOAT-64R2-NEXT:    move $19, $8
+; SOFT-FLOAT-64R2-NEXT:    move $17, $7
+; SOFT-FLOAT-64R2-NEXT:    move $20, $6
+; SOFT-FLOAT-64R2-NEXT:    move $18, $5
 ; SOFT-FLOAT-64R2-NEXT:    move $21, $4
-; SOFT-FLOAT-64R2-NEXT:    dsra $4, $4, 32
-; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    dsra $5, $6, 32
-; SOFT-FLOAT-64R2-NEXT:    move $22, $2
 ; SOFT-FLOAT-64R2-NEXT:    sll $4, $21, 0
 ; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $19, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $20, 0
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $19, 0
+; SOFT-FLOAT-64R2-NEXT:    move $22, $2
+; SOFT-FLOAT-64R2-NEXT:    sll $4, $18, 0
+; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
 ; SOFT-FLOAT-64R2-NEXT:    sll $5, $17, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $22, 0
-; SOFT-FLOAT-64R2-NEXT:    dsra $5, $17, 32
+; SOFT-FLOAT-64R2-NEXT:    move $23, $2
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $21, 32
+; SOFT-FLOAT-64R2-NEXT:    sll $4, $1, 0
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $20, 32
+; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $1, 0
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $19, 32
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $1, 0
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
-; SOFT-FLOAT-64R2-NEXT:    dext $17, $2, 0, 32
+; SOFT-FLOAT-64R2-NEXT:    dext $19, $22, 0, 32
 ; SOFT-FLOAT-64R2-NEXT:    # kill: def $v0 killed $v0 def $v0_64
 ; SOFT-FLOAT-64R2-NEXT:    dsll $1, $2, 32
-; SOFT-FLOAT-64R2-NEXT:    dsra $4, $20, 32
-; SOFT-FLOAT-64R2-NEXT:    dsra $5, $18, 32
-; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    or $17, $17, $1
-; SOFT-FLOAT-64R2-NEXT:    move $19, $2
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $20, 0
-; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $18, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $2, 0
-; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
 ; SOFT-FLOAT-64R2-NEXT:    sll $5, $16, 0
-; SOFT-FLOAT-64R2-NEXT:    dext $18, $2, 0, 32
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $19, 0
+; SOFT-FLOAT-64R2-NEXT:    or $19, $19, $1
+; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
+; SOFT-FLOAT-64R2-NEXT:    move $4, $23
+; SOFT-FLOAT-64R2-NEXT:    move $20, $2
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $18, 32
+; SOFT-FLOAT-64R2-NEXT:    sll $4, $1, 0
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $17, 32
+; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $1, 0
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
+; SOFT-FLOAT-64R2-NEXT:    dext $17, $20, 0, 32
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $16, 32
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
-; SOFT-FLOAT-64R2-NEXT:    dsra $5, $16, 32
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $1, 0
 ; SOFT-FLOAT-64R2-NEXT:    # kill: def $v0 killed $v0 def $v0_64
 ; SOFT-FLOAT-64R2-NEXT:    dsll $1, $2, 32
-; SOFT-FLOAT-64R2-NEXT:    or $3, $18, $1
-; SOFT-FLOAT-64R2-NEXT:    move $2, $17
-; SOFT-FLOAT-64R2-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $17, 8($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $18, 16($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $19, 24($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $20, 32($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $21, 40($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $22, 48($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $ra, 56($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    or $3, $17, $1
+; SOFT-FLOAT-64R2-NEXT:    move $2, $19
+; SOFT-FLOAT-64R2-NEXT:    ld $16, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $17, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $18, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $19, 32($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $20, 40($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $21, 48($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $22, 56($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $23, 64($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $ra, 72($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    jr $ra
-; SOFT-FLOAT-64R2-NEXT:    daddiu $sp, $sp, 64
+; SOFT-FLOAT-64R2-NEXT:    daddiu $sp, $sp, 80
   %product = fmul contract <4 x float> %a, %b
   %result = fadd contract <4 x float> %product, %c
   ret <4 x float> %result



More information about the llvm-commits mailing list