[llvm] 238c3dc - [CodeGen][Mips] Remove fp128 libcall list (#153798)

via llvm-commits llvm-commits at lists.llvm.org
Mon Aug 18 00:22:45 PDT 2025


Author: Nikita Popov
Date: 2025-08-18T09:22:41+02:00
New Revision: 238c3dcd0dcc475e0695541351b8f4ad67c465b4

URL: https://github.com/llvm/llvm-project/commit/238c3dcd0dcc475e0695541351b8f4ad67c465b4
DIFF: https://github.com/llvm/llvm-project/commit/238c3dcd0dcc475e0695541351b8f4ad67c465b4.diff

LOG: [CodeGen][Mips] Remove fp128 libcall list (#153798)

Mips requires fp128 args/returns to be passed differently than i128. It
handles this by inspecting the pre-legalization type. However, for soft
float libcalls, the original type is currently not provided (it will
look like a i128 call). To work around that, MIPS maintains a list of
libcalls working on fp128.

This patch removes that list by providing the original, pre-softening
type to calling convention lowering. This is done by carrying additional
information in CallLoweringInfo, as we unfortunately do need both types
(we want the un-softened type for OrigTy, but we need the softened type
for the actual register assignment etc.)

This is in preparation for completely removing all the custom
pre-analysis code in the Mips backend and replacing it with use of
OrigTy.

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/TargetLowering.h
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
    llvm/lib/Target/Mips/MipsCCState.cpp
    llvm/lib/Target/Mips/MipsCCState.h
    llvm/lib/Target/Mips/MipsCallLowering.cpp
    llvm/lib/Target/Mips/MipsFastISel.cpp
    llvm/lib/Target/Mips/MipsISelLowering.cpp
    llvm/test/CodeGen/Mips/fmuladd-soft-float.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 272d7dd5f45e8..e9bb979e44973 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -301,6 +301,9 @@ class LLVM_ABI TargetLoweringBase {
   public:
     Value *Val;
     SDValue Node;
+    /// Original unlegalized argument type.
+    Type *OrigTy;
+    /// Same as OrigTy, or partially legalized for soft float libcalls.
     Type *Ty;
     bool IsSExt : 1;
     bool IsZExt : 1;
@@ -321,9 +324,9 @@ class LLVM_ABI TargetLoweringBase {
     Type *IndirectType = nullptr;
 
     ArgListEntry(Value *Val, SDValue Node, Type *Ty)
-        : Val(Val), Node(Node), Ty(Ty), IsSExt(false), IsZExt(false),
-          IsNoExt(false), IsInReg(false), IsSRet(false), IsNest(false),
-          IsByVal(false), IsByRef(false), IsInAlloca(false),
+        : Val(Val), Node(Node), OrigTy(Ty), Ty(Ty), IsSExt(false),
+          IsZExt(false), IsNoExt(false), IsInReg(false), IsSRet(false),
+          IsNest(false), IsByVal(false), IsByRef(false), IsInAlloca(false),
           IsPreallocated(false), IsReturned(false), IsSwiftSelf(false),
           IsSwiftAsync(false), IsSwiftError(false), IsCFGuardTarget(false) {}
 
@@ -4677,6 +4680,9 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
   /// implementation.
   struct CallLoweringInfo {
     SDValue Chain;
+    /// Original unlegalized return type.
+    Type *OrigRetTy = nullptr;
+    /// Same as OrigRetTy, or partially legalized for soft float libcalls.
     Type *RetTy = nullptr;
     bool RetSExt           : 1;
     bool RetZExt           : 1;
@@ -4731,6 +4737,14 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
     // setCallee with target/module-specific attributes
     CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
                                    SDValue Target, ArgListTy &&ArgsList) {
+      return setLibCallee(CC, ResultType, ResultType, Target,
+                          std::move(ArgsList));
+    }
+
+    CallLoweringInfo &setLibCallee(CallingConv::ID CC, Type *ResultType,
+                                   Type *OrigResultType, SDValue Target,
+                                   ArgListTy &&ArgsList) {
+      OrigRetTy = OrigResultType;
       RetTy = ResultType;
       Callee = Target;
       CallConv = CC;
@@ -4745,7 +4759,7 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
     CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
                                 SDValue Target, ArgListTy &&ArgsList,
                                 AttributeSet ResultAttrs = {}) {
-      RetTy = ResultType;
+      RetTy = OrigRetTy = ResultType;
       IsInReg = ResultAttrs.hasAttribute(Attribute::InReg);
       RetSExt = ResultAttrs.hasAttribute(Attribute::SExt);
       RetZExt = ResultAttrs.hasAttribute(Attribute::ZExt);
@@ -4761,7 +4775,7 @@ class LLVM_ABI TargetLowering : public TargetLoweringBase {
     CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
                                 SDValue Target, ArgListTy &&ArgsList,
                                 const CallBase &Call) {
-      RetTy = ResultType;
+      RetTy = OrigRetTy = ResultType;
 
       IsInReg = Call.hasRetAttr(Attribute::InReg);
       DoesNotReturn =

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 2a1ef2b980ac4..d2538e9832be2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -10998,11 +10998,17 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
   SmallVector<Type *, 4> RetOrigTys;
   SmallVector<TypeSize, 4> Offsets;
   auto &DL = CLI.DAG.getDataLayout();
-  ComputeValueTypes(DL, CLI.RetTy, RetOrigTys, &Offsets);
+  ComputeValueTypes(DL, CLI.OrigRetTy, RetOrigTys, &Offsets);
 
   SmallVector<EVT, 4> RetVTs;
-  for (Type *Ty : RetOrigTys)
-    RetVTs.push_back(getValueType(DL, Ty));
+  if (CLI.RetTy != CLI.OrigRetTy) {
+    assert(RetOrigTys.size() == 1 &&
+           "Only supported for non-aggregate returns");
+    RetVTs.push_back(getValueType(DL, CLI.RetTy));
+  } else {
+    for (Type *Ty : RetOrigTys)
+      RetVTs.push_back(getValueType(DL, Ty));
+  }
 
   if (CLI.IsPostTypeLegalization) {
     // If we are lowering a libcall after legalization, split the return type.
@@ -11053,7 +11059,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
     CLI.getArgs().insert(CLI.getArgs().begin(), Entry);
     CLI.NumFixedArgs += 1;
     CLI.getArgs()[0].IndirectType = CLI.RetTy;
-    CLI.RetTy = Type::getVoidTy(Context);
+    CLI.RetTy = CLI.OrigRetTy = Type::getVoidTy(Context);
 
     // sret demotion isn't compatible with tail-calls, since the sret argument
     // points into the callers stack frame.
@@ -11110,17 +11116,23 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
   CLI.Outs.clear();
   CLI.OutVals.clear();
   for (unsigned i = 0, e = Args.size(); i != e; ++i) {
-    SmallVector<Type *, 4> ArgTys;
-    ComputeValueTypes(DL, Args[i].Ty, ArgTys);
+    SmallVector<Type *, 4> OrigArgTys;
+    ComputeValueTypes(DL, Args[i].OrigTy, OrigArgTys);
     // FIXME: Split arguments if CLI.IsPostTypeLegalization
     Type *FinalType = Args[i].Ty;
     if (Args[i].IsByVal)
       FinalType = Args[i].IndirectType;
     bool NeedsRegBlock = functionArgumentNeedsConsecutiveRegisters(
         FinalType, CLI.CallConv, CLI.IsVarArg, DL);
-    for (unsigned Value = 0, NumValues = ArgTys.size(); Value != NumValues;
+    for (unsigned Value = 0, NumValues = OrigArgTys.size(); Value != NumValues;
          ++Value) {
-      Type *ArgTy = ArgTys[Value];
+      Type *OrigArgTy = OrigArgTys[Value];
+      Type *ArgTy = OrigArgTy;
+      if (Args[i].Ty != Args[i].OrigTy) {
+        assert(Value == 0 && "Only supported for non-aggregate arguments");
+        ArgTy = Args[i].Ty;
+      }
+
       EVT VT = getValueType(DL, ArgTy);
       SDValue Op = SDValue(Args[i].Node.getNode(),
                            Args[i].Node.getResNo() + Value);
@@ -11254,7 +11266,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
         // For scalable vectors the scalable part is currently handled
         // by individual targets, so we just use the known minimum size here.
         ISD::OutputArg MyFlags(
-            Flags, Parts[j].getValueType().getSimpleVT(), VT, ArgTy, i,
+            Flags, Parts[j].getValueType().getSimpleVT(), VT, OrigArgTy, i,
             j * Parts[j].getValueType().getStoreSize().getKnownMinValue());
         if (NumParts > 1 && j == 0)
           MyFlags.Flags.setSplit();

diff  --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index ca10a6ecb456d..402a012e8e555 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -169,6 +169,10 @@ TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
                    ? OpsTypeOverrides[i]
                    : NewOp.getValueType().getTypeForEVT(*DAG.getContext());
     TargetLowering::ArgListEntry Entry(NewOp, Ty);
+    if (CallOptions.IsSoften)
+      Entry.OrigTy =
+          CallOptions.OpsVTBeforeSoften[i].getTypeForEVT(*DAG.getContext());
+
     Entry.IsSExt =
         shouldSignExtendTypeInLibCall(Entry.Ty, CallOptions.IsSigned);
     Entry.IsZExt = !Entry.IsSExt;
@@ -188,18 +192,21 @@ TargetLowering::makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC, EVT RetVT,
       DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout()));
 
   Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
+  Type *OrigRetTy = RetTy;
   TargetLowering::CallLoweringInfo CLI(DAG);
   bool signExtend = shouldSignExtendTypeInLibCall(RetTy, CallOptions.IsSigned);
   bool zeroExtend = !signExtend;
 
-  if (CallOptions.IsSoften &&
-      !shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften)) {
-    signExtend = zeroExtend = false;
+  if (CallOptions.IsSoften) {
+    OrigRetTy = CallOptions.RetVTBeforeSoften.getTypeForEVT(*DAG.getContext());
+    if (!shouldExtendTypeInLibCall(CallOptions.RetVTBeforeSoften))
+      signExtend = zeroExtend = false;
   }
 
   CLI.setDebugLoc(dl)
       .setChain(InChain)
-      .setLibCallee(getLibcallCallingConv(LC), RetTy, Callee, std::move(Args))
+      .setLibCallee(getLibcallCallingConv(LC), RetTy, OrigRetTy, Callee,
+                    std::move(Args))
       .setNoReturn(CallOptions.DoesNotReturn)
       .setDiscardResult(!CallOptions.IsReturnValueUsed)
       .setIsPostTypeLegalization(CallOptions.IsPostTypeLegalization)

diff  --git a/llvm/lib/Target/Mips/MipsCCState.cpp b/llvm/lib/Target/Mips/MipsCCState.cpp
index d600343860b0b..d7b5633d7077e 100644
--- a/llvm/lib/Target/Mips/MipsCCState.cpp
+++ b/llvm/lib/Target/Mips/MipsCCState.cpp
@@ -12,31 +12,9 @@
 
 using namespace llvm;
 
-bool MipsCCState::isF128SoftLibCall(const char *CallSym) {
-  const char *const LibCalls[] = {
-      "__addtf3",      "__divtf3",     "__eqtf2",       "__extenddftf2",
-      "__extendsftf2", "__fixtfdi",    "__fixtfsi",     "__fixtfti",
-      "__fixunstfdi",  "__fixunstfsi", "__fixunstfti",  "__floatditf",
-      "__floatsitf",   "__floattitf",  "__floatunditf", "__floatunsitf",
-      "__floatuntitf", "__getf2",      "__gttf2",       "__letf2",
-      "__lttf2",       "__multf3",     "__netf2",       "__powitf2",
-      "__subtf3",      "__trunctfdf2", "__trunctfsf2",  "__unordtf2",
-      "ceill",         "copysignl",    "cosl",          "exp2l",
-      "expl",          "floorl",       "fmal",          "fmaxl",
-      "fmodl",         "frexpl",       "log10l",        "log2l",
-      "logl",          "nearbyintl",   "powl",          "rintl",
-      "roundl",        "sincosl",      "sinl",          "sqrtl",
-      "truncl"};
-
-  // Check that LibCalls is sorted alphabetically.
-  auto Comp = [](const char *S1, const char *S2) { return strcmp(S1, S2) < 0; };
-  assert(llvm::is_sorted(LibCalls, Comp));
-  return llvm::binary_search(LibCalls, CallSym, Comp);
-}
-
 /// This function returns true if Ty is fp128, {f128} or i128 which was
 /// originally a fp128.
-bool MipsCCState::originalTypeIsF128(const Type *Ty, const char *Func) {
+bool MipsCCState::originalTypeIsF128(const Type *Ty) {
   if (Ty->isFP128Ty())
     return true;
 
@@ -44,10 +22,7 @@ bool MipsCCState::originalTypeIsF128(const Type *Ty, const char *Func) {
       Ty->getStructElementType(0)->isFP128Ty())
     return true;
 
-  // If the Ty is i128 and the function being called is a long double emulation
-  // routine, then the original type is f128.
-  // FIXME: This is unsound because these functions could be indirectly called
-  return (Func && Ty->isIntegerTy(128) && isF128SoftLibCall(Func));
+  return false;
 }
 
 /// Return true if the original type was vXfXX.
@@ -84,11 +59,9 @@ MipsCCState::getSpecialCallingConvForCallee(const SDNode *Callee,
 }
 
 void MipsCCState::PreAnalyzeCallResultForF128(
-    const SmallVectorImpl<ISD::InputArg> &Ins,
-    const Type *RetTy, const char *Call) {
+    const SmallVectorImpl<ISD::InputArg> &Ins, const Type *RetTy) {
   for (unsigned i = 0; i < Ins.size(); ++i) {
-    OriginalArgWasF128.push_back(
-        originalTypeIsF128(RetTy, Call));
+    OriginalArgWasF128.push_back(originalTypeIsF128(RetTy));
     OriginalArgWasFloat.push_back(RetTy->isFloatingPointTy());
   }
 }
@@ -98,8 +71,7 @@ void MipsCCState::PreAnalyzeCallResultForF128(
 void MipsCCState::PreAnalyzeCallReturnForF128(
     const SmallVectorImpl<ISD::OutputArg> &Outs, const Type *RetTy) {
   for (unsigned i = 0; i < Outs.size(); ++i) {
-    OriginalArgWasF128.push_back(
-        originalTypeIsF128(RetTy, nullptr));
+    OriginalArgWasF128.push_back(originalTypeIsF128(RetTy));
     OriginalArgWasFloat.push_back(
         RetTy->isFloatingPointTy());
   }
@@ -129,8 +101,8 @@ void MipsCCState::PreAnalyzeReturnValue(EVT ArgVT) {
   OriginalRetWasFloatVector.push_back(originalEVTTypeIsVectorFloat(ArgVT));
 }
 
-void MipsCCState::PreAnalyzeCallOperand(const Type *ArgTy, const char *Func) {
-  OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy, Func));
+void MipsCCState::PreAnalyzeCallOperand(const Type *ArgTy) {
+  OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy));
   OriginalArgWasFloat.push_back(ArgTy->isFloatingPointTy());
   OriginalArgWasFloatVector.push_back(ArgTy->isVectorTy());
 }
@@ -139,14 +111,13 @@ void MipsCCState::PreAnalyzeCallOperand(const Type *ArgTy, const char *Func) {
 /// arguments and record this.
 void MipsCCState::PreAnalyzeCallOperands(
     const SmallVectorImpl<ISD::OutputArg> &Outs,
-    std::vector<TargetLowering::ArgListEntry> &FuncArgs,
-    const char *Func) {
+    std::vector<TargetLowering::ArgListEntry> &FuncArgs) {
   for (unsigned i = 0; i < Outs.size(); ++i) {
     TargetLowering::ArgListEntry FuncArg = FuncArgs[Outs[i].OrigArgIndex];
 
-    OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg.Ty, Func));
-    OriginalArgWasFloat.push_back(FuncArg.Ty->isFloatingPointTy());
-    OriginalArgWasFloatVector.push_back(FuncArg.Ty->isVectorTy());
+    OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg.OrigTy));
+    OriginalArgWasFloat.push_back(FuncArg.OrigTy->isFloatingPointTy());
+    OriginalArgWasFloatVector.push_back(FuncArg.OrigTy->isVectorTy());
   }
 }
 
@@ -162,7 +133,7 @@ void MipsCCState::PreAnalyzeFormalArgument(const Type *ArgTy,
     return;
   }
 
-  OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy, nullptr));
+  OriginalArgWasF128.push_back(originalTypeIsF128(ArgTy));
   OriginalArgWasFloat.push_back(ArgTy->isFloatingPointTy());
 
   // The MIPS vector ABI exhibits a corner case of sorts or quirk; if the
@@ -192,8 +163,7 @@ void MipsCCState::PreAnalyzeFormalArgumentsForF128(
     assert(Ins[i].getOrigArgIndex() < MF.getFunction().arg_size());
     std::advance(FuncArg, Ins[i].getOrigArgIndex());
 
-    OriginalArgWasF128.push_back(
-        originalTypeIsF128(FuncArg->getType(), nullptr));
+    OriginalArgWasF128.push_back(originalTypeIsF128(FuncArg->getType()));
     OriginalArgWasFloat.push_back(FuncArg->getType()->isFloatingPointTy());
 
     // The MIPS vector ABI exhibits a corner case of sorts or quirk; if the

diff  --git a/llvm/lib/Target/Mips/MipsCCState.h b/llvm/lib/Target/Mips/MipsCCState.h
index 30b68e8a9c969..4d985518ce7c5 100644
--- a/llvm/lib/Target/Mips/MipsCCState.h
+++ b/llvm/lib/Target/Mips/MipsCCState.h
@@ -26,17 +26,11 @@ class MipsCCState : public CCState {
   getSpecialCallingConvForCallee(const SDNode *Callee,
                                  const MipsSubtarget &Subtarget);
 
-  /// This function returns true if CallSym is a long double emulation routine.
-  ///
-  /// FIXME: Changing the ABI based on the callee name is unsound. The lib func
-  /// address could be captured.
-  static bool isF128SoftLibCall(const char *CallSym);
-
-  static bool originalTypeIsF128(const Type *Ty, const char *Func);
+  static bool originalTypeIsF128(const Type *Ty);
   static bool originalEVTTypeIsVectorFloat(EVT Ty);
   static bool originalTypeIsVectorFloat(const Type *Ty);
 
-  void PreAnalyzeCallOperand(const Type *ArgTy, const char *Func);
+  void PreAnalyzeCallOperand(const Type *ArgTy);
 
   void PreAnalyzeFormalArgument(const Type *ArgTy, ISD::ArgFlagsTy Flags);
   void PreAnalyzeReturnValue(EVT ArgVT);
@@ -45,7 +39,7 @@ class MipsCCState : public CCState {
   /// Identify lowered values that originated from f128 arguments and record
   /// this for use by RetCC_MipsN.
   void PreAnalyzeCallResultForF128(const SmallVectorImpl<ISD::InputArg> &Ins,
-                                   const Type *RetTy, const char * Func);
+                                   const Type *RetTy);
 
   /// Identify lowered values that originated from f128 arguments and record
   /// this for use by RetCC_MipsN.
@@ -55,8 +49,7 @@ class MipsCCState : public CCState {
   /// this.
   void
   PreAnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
-                         std::vector<TargetLowering::ArgListEntry> &FuncArgs,
-                         const char *Func);
+                         std::vector<TargetLowering::ArgListEntry> &FuncArgs);
 
   /// Identify lowered values that originated from f128 arguments and record
   /// this for use by RetCC_MipsN.
@@ -96,21 +89,21 @@ class MipsCCState : public CCState {
               SpecialCallingConvType SpecialCC = NoSpecialCallingConv)
       : CCState(CC, isVarArg, MF, locs, C), SpecialCallingConv(SpecialCC) {}
 
-  void PreAnalyzeCallOperands(
-      const SmallVectorImpl<ISD::OutputArg> &Outs, CCAssignFn Fn,
-      std::vector<TargetLowering::ArgListEntry> &FuncArgs, const char *Func) {
+  void
+  PreAnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
+                         CCAssignFn Fn,
+                         std::vector<TargetLowering::ArgListEntry> &FuncArgs) {
     OriginalArgWasF128.clear();
     OriginalArgWasFloat.clear();
     OriginalArgWasFloatVector.clear();
-    PreAnalyzeCallOperands(Outs, FuncArgs, Func);
+    PreAnalyzeCallOperands(Outs, FuncArgs);
   }
 
   void
   AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
                       CCAssignFn Fn,
-                      std::vector<TargetLowering::ArgListEntry> &FuncArgs,
-                      const char *Func) {
-    PreAnalyzeCallOperands(Outs, Fn, FuncArgs, Func);
+                      std::vector<TargetLowering::ArgListEntry> &FuncArgs) {
+    PreAnalyzeCallOperands(Outs, Fn, FuncArgs);
     CCState::AnalyzeCallOperands(Outs, Fn);
   }
 
@@ -137,26 +130,24 @@ class MipsCCState : public CCState {
     CCState::AnalyzeFormalArguments(Ins, Fn);
   }
 
-  void PreAnalyzeCallResult(const Type *RetTy, const char *Func) {
-    OriginalArgWasF128.push_back(originalTypeIsF128(RetTy, Func));
+  void PreAnalyzeCallResult(const Type *RetTy) {
+    OriginalArgWasF128.push_back(originalTypeIsF128(RetTy));
     OriginalArgWasFloat.push_back(RetTy->isFloatingPointTy());
     OriginalRetWasFloatVector.push_back(originalTypeIsVectorFloat(RetTy));
   }
 
   void PreAnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
-                            CCAssignFn Fn, const Type *RetTy,
-                            const char *Func) {
+                            CCAssignFn Fn, const Type *RetTy) {
     OriginalArgWasFloat.clear();
     OriginalArgWasF128.clear();
     OriginalArgWasFloatVector.clear();
-    PreAnalyzeCallResultForF128(Ins, RetTy, Func);
+    PreAnalyzeCallResultForF128(Ins, RetTy);
     PreAnalyzeCallResultForVectorFloat(Ins, RetTy);
   }
 
   void AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
-                         CCAssignFn Fn, const Type *RetTy,
-                         const char *Func) {
-    PreAnalyzeCallResult(Ins, Fn, RetTy, Func);
+                         CCAssignFn Fn, const Type *RetTy) {
+    PreAnalyzeCallResult(Ins, Fn, RetTy);
     CCState::AnalyzeCallResult(Ins, Fn);
   }
 

diff  --git a/llvm/lib/Target/Mips/MipsCallLowering.cpp b/llvm/lib/Target/Mips/MipsCallLowering.cpp
index fa491086b0ac9..5b67346209731 100644
--- a/llvm/lib/Target/Mips/MipsCallLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsCallLowering.cpp
@@ -27,16 +27,11 @@ MipsCallLowering::MipsCallLowering(const MipsTargetLowering &TLI)
 
 namespace {
 struct MipsOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
-  /// This is the name of the function being called
-  /// FIXME: Relying on this is unsound
-  const char *Func = nullptr;
-
   /// Is this a return value, or an outgoing call operand.
   bool IsReturn;
 
-  MipsOutgoingValueAssigner(CCAssignFn *AssignFn_, const char *Func,
-                            bool IsReturn)
-      : OutgoingValueAssigner(AssignFn_), Func(Func), IsReturn(IsReturn) {}
+  MipsOutgoingValueAssigner(CCAssignFn *AssignFn_, bool IsReturn)
+      : OutgoingValueAssigner(AssignFn_), IsReturn(IsReturn) {}
 
   bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
                  CCValAssign::LocInfo LocInfo,
@@ -47,7 +42,7 @@ struct MipsOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
     if (IsReturn)
       State.PreAnalyzeReturnValue(EVT::getEVT(Info.Ty));
     else
-      State.PreAnalyzeCallOperand(Info.Ty, Func);
+      State.PreAnalyzeCallOperand(Info.Ty);
 
     return CallLowering::OutgoingValueAssigner::assignArg(
         ValNo, OrigVT, ValVT, LocVT, LocInfo, Info, Flags, State);
@@ -55,16 +50,11 @@ struct MipsOutgoingValueAssigner : public CallLowering::OutgoingValueAssigner {
 };
 
 struct MipsIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
-  /// This is the name of the function being called
-  /// FIXME: Relying on this is unsound
-  const char *Func = nullptr;
-
   /// Is this a call return value, or an incoming function argument.
   bool IsReturn;
 
-  MipsIncomingValueAssigner(CCAssignFn *AssignFn_, const char *Func,
-                            bool IsReturn)
-      : IncomingValueAssigner(AssignFn_), Func(Func), IsReturn(IsReturn) {}
+  MipsIncomingValueAssigner(CCAssignFn *AssignFn_, bool IsReturn)
+      : IncomingValueAssigner(AssignFn_), IsReturn(IsReturn) {}
 
   bool assignArg(unsigned ValNo, EVT OrigVT, MVT ValVT, MVT LocVT,
                  CCValAssign::LocInfo LocInfo,
@@ -73,7 +63,7 @@ struct MipsIncomingValueAssigner : public CallLowering::IncomingValueAssigner {
     MipsCCState &State = static_cast<MipsCCState &>(State_);
 
     if (IsReturn)
-      State.PreAnalyzeCallResult(Info.Ty, Func);
+      State.PreAnalyzeCallResult(Info.Ty);
     else
       State.PreAnalyzeFormalArgument(Info.Ty, Flags);
 
@@ -339,9 +329,8 @@ bool MipsCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
                        F.getContext());
 
     MipsOutgoingValueHandler RetHandler(MIRBuilder, MF.getRegInfo(), Ret);
-    std::string FuncName = F.getName().str();
     MipsOutgoingValueAssigner Assigner(TLI.CCAssignFnForReturn(),
-                                       FuncName.c_str(), /*IsReturn*/ true);
+                                       /*IsReturn*/ true);
 
     if (!determineAssignments(Assigner, RetInfos, CCInfo))
       return false;
@@ -392,8 +381,7 @@ bool MipsCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder,
   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(F.getCallingConv()),
                        Align(1));
 
-  const std::string FuncName = F.getName().str();
-  MipsIncomingValueAssigner Assigner(TLI.CCAssignFnForCall(), FuncName.c_str(),
+  MipsIncomingValueAssigner Assigner(TLI.CCAssignFnForCall(),
                                      /*IsReturn*/ false);
   if (!determineAssignments(Assigner, ArgInfos, CCInfo))
     return false;
@@ -510,10 +498,7 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
   CCInfo.AllocateStack(ABI.GetCalleeAllocdArgSizeInBytes(Info.CallConv),
                        Align(1));
 
-  const char *Call =
-      Info.Callee.isSymbol() ? Info.Callee.getSymbolName() : nullptr;
-
-  MipsOutgoingValueAssigner Assigner(TLI.CCAssignFnForCall(), Call,
+  MipsOutgoingValueAssigner Assigner(TLI.CCAssignFnForCall(),
                                      /*IsReturn*/ false);
   if (!determineAssignments(Assigner, ArgInfos, CCInfo))
     return false;
@@ -550,10 +535,8 @@ bool MipsCallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
     CallLowering::splitToValueTypes(Info.OrigRet, ArgInfos, DL,
                                     F.getCallingConv());
 
-    const std::string FuncName = F.getName().str();
     SmallVector<CCValAssign, 8> ArgLocs;
     MipsIncomingValueAssigner Assigner(TLI.CCAssignFnForReturn(),
-                                       FuncName.c_str(),
                                        /*IsReturn*/ true);
     CallReturnHandler RetHandler(MIRBuilder, MF.getRegInfo(), MIB);
 

diff  --git a/llvm/lib/Target/Mips/MipsFastISel.cpp b/llvm/lib/Target/Mips/MipsFastISel.cpp
index a9ac0eae5dace..94fb3cc356819 100644
--- a/llvm/lib/Target/Mips/MipsFastISel.cpp
+++ b/llvm/lib/Target/Mips/MipsFastISel.cpp
@@ -1293,9 +1293,7 @@ bool MipsFastISel::finishCall(CallLoweringInfo &CLI, MVT RetVT,
     SmallVector<CCValAssign, 16> RVLocs;
     MipsCCState CCInfo(CC, false, *FuncInfo.MF, RVLocs, *Context);
 
-    CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips, CLI.RetTy,
-                             CLI.Symbol ? CLI.Symbol->getName().data()
-                                        : nullptr);
+    CCInfo.AnalyzeCallResult(CLI.Ins, RetCC_Mips, CLI.RetTy);
 
     // Only handle a single return value.
     if (RVLocs.size() != 1)

diff  --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index ed626f2d74788..466c13e78fbd5 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -3391,8 +3391,7 @@ MipsTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
       MemcpyInByVal ? 0 : ABI.GetCalleeAllocdArgSizeInBytes(CallConv);
   CCInfo.AllocateStack(ReservedArgArea, Align(1));
 
-  CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs(),
-                             ES ? ES->getSymbol() : nullptr);
+  CCInfo.AnalyzeCallOperands(Outs, CC_Mips, CLI.getArgs());
 
   // Get a count of how many bytes are to be pushed on the stack.
   unsigned StackSize = CCInfo.getStackSize();
@@ -3687,10 +3686,7 @@ SDValue MipsTargetLowering::LowerCallResult(
   MipsCCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
                      *DAG.getContext());
 
-  const ExternalSymbolSDNode *ES =
-      dyn_cast_or_null<const ExternalSymbolSDNode>(CLI.Callee.getNode());
-  CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.RetTy,
-                           ES ? ES->getSymbol() : nullptr);
+  CCInfo.AnalyzeCallResult(Ins, RetCC_Mips, CLI.OrigRetTy);
 
   // Copy all of the result registers out of their specified physreg.
   for (unsigned i = 0; i != RVLocs.size(); ++i) {

diff  --git a/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll b/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll
index bbfb7cf9ca907..409b1a1f818ab 100644
--- a/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll
+++ b/llvm/test/CodeGen/Mips/fmuladd-soft-float.ll
@@ -49,13 +49,11 @@ define float @fmuladd_intrinsic_f32(float %a, float %b, float %c) #0 {
 ; SOFT-FLOAT-64-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 31, -8
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 16, -16
-; SOFT-FLOAT-64-NEXT:    move $16, $6
-; SOFT-FLOAT-64-NEXT:    sll $4, $4, 0
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $5, 0
-; SOFT-FLOAT-64-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64-NEXT:    move $16, $6
+; SOFT-FLOAT-64-NEXT:    move $4, $2
 ; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64-NEXT:    move $5, $16
 ; SOFT-FLOAT-64-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    jr $ra
@@ -69,13 +67,11 @@ define float @fmuladd_intrinsic_f32(float %a, float %b, float %c) #0 {
 ; SOFT-FLOAT-64R2-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 31, -8
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 16, -16
-; SOFT-FLOAT-64R2-NEXT:    move $16, $6
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $4, 0
 ; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $5, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64R2-NEXT:    move $16, $6
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64R2-NEXT:    move $5, $16
 ; SOFT-FLOAT-64R2-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    jr $ra
@@ -203,13 +199,11 @@ define float @fmuladd_contract_f32(float %a, float %b, float %c) #0 {
 ; SOFT-FLOAT-64-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 31, -8
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 16, -16
-; SOFT-FLOAT-64-NEXT:    move $16, $6
-; SOFT-FLOAT-64-NEXT:    sll $4, $4, 0
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $5, 0
-; SOFT-FLOAT-64-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64-NEXT:    move $16, $6
+; SOFT-FLOAT-64-NEXT:    move $4, $2
 ; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64-NEXT:    move $5, $16
 ; SOFT-FLOAT-64-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    jr $ra
@@ -223,13 +217,11 @@ define float @fmuladd_contract_f32(float %a, float %b, float %c) #0 {
 ; SOFT-FLOAT-64R2-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 31, -8
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 16, -16
-; SOFT-FLOAT-64R2-NEXT:    move $16, $6
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $4, 0
 ; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $5, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64R2-NEXT:    move $16, $6
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64R2-NEXT:    move $5, $16
 ; SOFT-FLOAT-64R2-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    ld $ra, 8($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    jr $ra
@@ -443,149 +435,169 @@ define <4 x float> @fmuladd_contract_v4f32(<4 x float> %a, <4 x float> %b, <4 x
 ;
 ; SOFT-FLOAT-64-LABEL: fmuladd_contract_v4f32:
 ; SOFT-FLOAT-64:       # %bb.0:
-; SOFT-FLOAT-64-NEXT:    daddiu $sp, $sp, -64
-; SOFT-FLOAT-64-NEXT:    .cfi_def_cfa_offset 64
-; SOFT-FLOAT-64-NEXT:    sd $ra, 56($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $22, 48($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $21, 40($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $20, 32($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $19, 24($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $18, 16($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $17, 8($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    daddiu $sp, $sp, -80
+; SOFT-FLOAT-64-NEXT:    .cfi_def_cfa_offset 80
+; SOFT-FLOAT-64-NEXT:    sd $ra, 72($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $23, 64($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $22, 56($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $21, 48($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $20, 40($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $19, 32($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $18, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $17, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64-NEXT:    sd $16, 8($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64-NEXT:    .cfi_offset 31, -8
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 22, -16
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 21, -24
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 20, -32
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 19, -40
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 18, -48
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 17, -56
-; SOFT-FLOAT-64-NEXT:    .cfi_offset 16, -64
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 23, -16
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 22, -24
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 21, -32
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 20, -40
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 19, -48
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 18, -56
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 17, -64
+; SOFT-FLOAT-64-NEXT:    .cfi_offset 16, -72
 ; SOFT-FLOAT-64-NEXT:    move $16, $9
-; SOFT-FLOAT-64-NEXT:    move $17, $8
-; SOFT-FLOAT-64-NEXT:    move $18, $7
-; SOFT-FLOAT-64-NEXT:    move $19, $6
-; SOFT-FLOAT-64-NEXT:    move $20, $5
+; SOFT-FLOAT-64-NEXT:    move $19, $8
+; SOFT-FLOAT-64-NEXT:    move $17, $7
+; SOFT-FLOAT-64-NEXT:    move $20, $6
+; SOFT-FLOAT-64-NEXT:    move $18, $5
 ; SOFT-FLOAT-64-NEXT:    move $21, $4
-; SOFT-FLOAT-64-NEXT:    sll $4, $4, 0
+; SOFT-FLOAT-64-NEXT:    sll $4, $21, 0
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $6, 0
+; SOFT-FLOAT-64-NEXT:    sll $5, $20, 0
+; SOFT-FLOAT-64-NEXT:    move $4, $2
+; SOFT-FLOAT-64-NEXT:    jal __addsf3
+; SOFT-FLOAT-64-NEXT:    sll $5, $19, 0
 ; SOFT-FLOAT-64-NEXT:    move $22, $2
-; SOFT-FLOAT-64-NEXT:    dsra $4, $21, 32
+; SOFT-FLOAT-64-NEXT:    sll $4, $18, 0
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    dsra $5, $19, 32
-; SOFT-FLOAT-64-NEXT:    sll $4, $2, 0
-; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    dsra $5, $17, 32
-; SOFT-FLOAT-64-NEXT:    # kill: def $v0 killed $v0 def $v0_64
-; SOFT-FLOAT-64-NEXT:    sll $4, $22, 0
 ; SOFT-FLOAT-64-NEXT:    sll $5, $17, 0
+; SOFT-FLOAT-64-NEXT:    move $23, $2
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $21, 32
+; SOFT-FLOAT-64-NEXT:    sll $4, $1, 0
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $20, 32
+; SOFT-FLOAT-64-NEXT:    jal __mulsf3
+; SOFT-FLOAT-64-NEXT:    sll $5, $1, 0
+; SOFT-FLOAT-64-NEXT:    move $4, $2
+; SOFT-FLOAT-64-NEXT:    dsll $1, $22, 32
+; SOFT-FLOAT-64-NEXT:    dsrl $2, $19, 32
+; SOFT-FLOAT-64-NEXT:    sll $5, $2, 0
 ; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    dsll $17, $2, 32
+; SOFT-FLOAT-64-NEXT:    dsrl $19, $1, 32
+; SOFT-FLOAT-64-NEXT:    # kill: def $v0 killed $v0 def $v0_64
 ; SOFT-FLOAT-64-NEXT:    dsll $1, $2, 32
-; SOFT-FLOAT-64-NEXT:    dsrl $1, $1, 32
-; SOFT-FLOAT-64-NEXT:    sll $4, $20, 0
-; SOFT-FLOAT-64-NEXT:    sll $5, $18, 0
-; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    or $17, $1, $17
-; SOFT-FLOAT-64-NEXT:    move $19, $2
-; SOFT-FLOAT-64-NEXT:    dsra $4, $20, 32
+; SOFT-FLOAT-64-NEXT:    sll $5, $16, 0
+; SOFT-FLOAT-64-NEXT:    or $19, $19, $1
+; SOFT-FLOAT-64-NEXT:    jal __addsf3
+; SOFT-FLOAT-64-NEXT:    move $4, $23
+; SOFT-FLOAT-64-NEXT:    move $20, $2
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $18, 32
+; SOFT-FLOAT-64-NEXT:    sll $4, $1, 0
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $17, 32
 ; SOFT-FLOAT-64-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64-NEXT:    dsra $5, $18, 32
-; SOFT-FLOAT-64-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64-NEXT:    sll $5, $1, 0
+; SOFT-FLOAT-64-NEXT:    move $4, $2
+; SOFT-FLOAT-64-NEXT:    dsll $1, $20, 32
+; SOFT-FLOAT-64-NEXT:    dsrl $17, $1, 32
+; SOFT-FLOAT-64-NEXT:    dsrl $1, $16, 32
 ; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    dsra $5, $16, 32
+; SOFT-FLOAT-64-NEXT:    sll $5, $1, 0
 ; SOFT-FLOAT-64-NEXT:    # kill: def $v0 killed $v0 def $v0_64
-; SOFT-FLOAT-64-NEXT:    dsll $18, $2, 32
-; SOFT-FLOAT-64-NEXT:    sll $4, $19, 0
-; SOFT-FLOAT-64-NEXT:    jal __addsf3
-; SOFT-FLOAT-64-NEXT:    sll $5, $16, 0
 ; SOFT-FLOAT-64-NEXT:    dsll $1, $2, 32
-; SOFT-FLOAT-64-NEXT:    dsrl $1, $1, 32
-; SOFT-FLOAT-64-NEXT:    or $3, $1, $18
-; SOFT-FLOAT-64-NEXT:    move $2, $17
-; SOFT-FLOAT-64-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $17, 8($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $18, 16($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $19, 24($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $20, 32($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $21, 40($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $22, 48($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64-NEXT:    ld $ra, 56($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    or $3, $17, $1
+; SOFT-FLOAT-64-NEXT:    move $2, $19
+; SOFT-FLOAT-64-NEXT:    ld $16, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $17, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $18, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $19, 32($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $20, 40($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $21, 48($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $22, 56($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $23, 64($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64-NEXT:    ld $ra, 72($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64-NEXT:    jr $ra
-; SOFT-FLOAT-64-NEXT:    daddiu $sp, $sp, 64
+; SOFT-FLOAT-64-NEXT:    daddiu $sp, $sp, 80
 ;
 ; SOFT-FLOAT-64R2-LABEL: fmuladd_contract_v4f32:
 ; SOFT-FLOAT-64R2:       # %bb.0:
-; SOFT-FLOAT-64R2-NEXT:    daddiu $sp, $sp, -64
-; SOFT-FLOAT-64R2-NEXT:    .cfi_def_cfa_offset 64
-; SOFT-FLOAT-64R2-NEXT:    sd $ra, 56($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $22, 48($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $21, 40($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $20, 32($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $19, 24($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $18, 16($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $17, 8($sp) # 8-byte Folded Spill
-; SOFT-FLOAT-64R2-NEXT:    sd $16, 0($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    daddiu $sp, $sp, -80
+; SOFT-FLOAT-64R2-NEXT:    .cfi_def_cfa_offset 80
+; SOFT-FLOAT-64R2-NEXT:    sd $ra, 72($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $23, 64($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $22, 56($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $21, 48($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $20, 40($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $19, 32($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $18, 24($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $17, 16($sp) # 8-byte Folded Spill
+; SOFT-FLOAT-64R2-NEXT:    sd $16, 8($sp) # 8-byte Folded Spill
 ; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 31, -8
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 22, -16
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 21, -24
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 20, -32
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 19, -40
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 18, -48
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 17, -56
-; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 16, -64
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 23, -16
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 22, -24
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 21, -32
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 20, -40
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 19, -48
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 18, -56
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 17, -64
+; SOFT-FLOAT-64R2-NEXT:    .cfi_offset 16, -72
 ; SOFT-FLOAT-64R2-NEXT:    move $16, $9
-; SOFT-FLOAT-64R2-NEXT:    move $17, $8
-; SOFT-FLOAT-64R2-NEXT:    move $18, $7
-; SOFT-FLOAT-64R2-NEXT:    move $19, $6
-; SOFT-FLOAT-64R2-NEXT:    move $20, $5
+; SOFT-FLOAT-64R2-NEXT:    move $19, $8
+; SOFT-FLOAT-64R2-NEXT:    move $17, $7
+; SOFT-FLOAT-64R2-NEXT:    move $20, $6
+; SOFT-FLOAT-64R2-NEXT:    move $18, $5
 ; SOFT-FLOAT-64R2-NEXT:    move $21, $4
-; SOFT-FLOAT-64R2-NEXT:    dsra $4, $4, 32
-; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    dsra $5, $6, 32
-; SOFT-FLOAT-64R2-NEXT:    move $22, $2
 ; SOFT-FLOAT-64R2-NEXT:    sll $4, $21, 0
 ; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $19, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $2, 0
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $20, 0
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $19, 0
+; SOFT-FLOAT-64R2-NEXT:    move $22, $2
+; SOFT-FLOAT-64R2-NEXT:    sll $4, $18, 0
+; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
 ; SOFT-FLOAT-64R2-NEXT:    sll $5, $17, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $22, 0
-; SOFT-FLOAT-64R2-NEXT:    dsra $5, $17, 32
+; SOFT-FLOAT-64R2-NEXT:    move $23, $2
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $21, 32
+; SOFT-FLOAT-64R2-NEXT:    sll $4, $1, 0
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $20, 32
+; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $1, 0
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $19, 32
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $1, 0
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
-; SOFT-FLOAT-64R2-NEXT:    dext $17, $2, 0, 32
+; SOFT-FLOAT-64R2-NEXT:    dext $19, $22, 0, 32
 ; SOFT-FLOAT-64R2-NEXT:    # kill: def $v0 killed $v0 def $v0_64
 ; SOFT-FLOAT-64R2-NEXT:    dsll $1, $2, 32
-; SOFT-FLOAT-64R2-NEXT:    dsra $4, $20, 32
-; SOFT-FLOAT-64R2-NEXT:    dsra $5, $18, 32
-; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    or $17, $17, $1
-; SOFT-FLOAT-64R2-NEXT:    move $19, $2
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $20, 0
-; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
-; SOFT-FLOAT-64R2-NEXT:    sll $5, $18, 0
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $2, 0
-; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
 ; SOFT-FLOAT-64R2-NEXT:    sll $5, $16, 0
-; SOFT-FLOAT-64R2-NEXT:    dext $18, $2, 0, 32
-; SOFT-FLOAT-64R2-NEXT:    sll $4, $19, 0
+; SOFT-FLOAT-64R2-NEXT:    or $19, $19, $1
+; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
+; SOFT-FLOAT-64R2-NEXT:    move $4, $23
+; SOFT-FLOAT-64R2-NEXT:    move $20, $2
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $18, 32
+; SOFT-FLOAT-64R2-NEXT:    sll $4, $1, 0
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $17, 32
+; SOFT-FLOAT-64R2-NEXT:    jal __mulsf3
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $1, 0
+; SOFT-FLOAT-64R2-NEXT:    move $4, $2
+; SOFT-FLOAT-64R2-NEXT:    dext $17, $20, 0, 32
+; SOFT-FLOAT-64R2-NEXT:    dsrl $1, $16, 32
 ; SOFT-FLOAT-64R2-NEXT:    jal __addsf3
-; SOFT-FLOAT-64R2-NEXT:    dsra $5, $16, 32
+; SOFT-FLOAT-64R2-NEXT:    sll $5, $1, 0
 ; SOFT-FLOAT-64R2-NEXT:    # kill: def $v0 killed $v0 def $v0_64
 ; SOFT-FLOAT-64R2-NEXT:    dsll $1, $2, 32
-; SOFT-FLOAT-64R2-NEXT:    or $3, $18, $1
-; SOFT-FLOAT-64R2-NEXT:    move $2, $17
-; SOFT-FLOAT-64R2-NEXT:    ld $16, 0($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $17, 8($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $18, 16($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $19, 24($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $20, 32($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $21, 40($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $22, 48($sp) # 8-byte Folded Reload
-; SOFT-FLOAT-64R2-NEXT:    ld $ra, 56($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    or $3, $17, $1
+; SOFT-FLOAT-64R2-NEXT:    move $2, $19
+; SOFT-FLOAT-64R2-NEXT:    ld $16, 8($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $17, 16($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $18, 24($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $19, 32($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $20, 40($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $21, 48($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $22, 56($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $23, 64($sp) # 8-byte Folded Reload
+; SOFT-FLOAT-64R2-NEXT:    ld $ra, 72($sp) # 8-byte Folded Reload
 ; SOFT-FLOAT-64R2-NEXT:    jr $ra
-; SOFT-FLOAT-64R2-NEXT:    daddiu $sp, $sp, 64
+; SOFT-FLOAT-64R2-NEXT:    daddiu $sp, $sp, 80
   %product = fmul contract <4 x float> %a, %b
   %result = fadd contract <4 x float> %product, %c
   ret <4 x float> %result


        


More information about the llvm-commits mailing list