[llvm] [llvm] Ensure that soft float targets don't use float/vector code for memops. (PR #107022)

Alex Rønne Petersen via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 2 14:53:38 PDT 2024


https://github.com/alexrp updated https://github.com/llvm/llvm-project/pull/107022

>From ce5e25e8fa2e86c5b6e7899392088325603c1c57 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Alex=20R=C3=B8nne=20Petersen?= <alex at alexrp.com>
Date: Mon, 2 Sep 2024 21:11:18 +0200
Subject: [PATCH] [llvm] Ensure that soft float targets don't use float/vector
 code for memops.

Callers of TargetLowering::findOptimalMemOpLowering() and ::getOptimalMemOpLLT()
now pass an argument indicating whether they're interested in float/vector
types, which is decided based on the use-soft-float and noimplicitfloat function
attributes. If the implementation returns a float or vector type when the caller
is not interested in those, the caller will just act as if EVT::Other had been
returned. In this way, there's no need for a typical implementation to worry
about getting these rarer cases right (which, in practice, some of them didn't
previously). Still, an implementation can use the new parameter in its logic if
that's helpful; for example, x86 and PPC do so because, for integer scalars,
they want to handle misaligned operations differently from the target-agnostic
code's handling of EVT::Other.

Note: The RISC-V backend still gets this very wrong, hence why I'm not adding a
test for it. In its case, the reason appears to be that it doesn't support these
attributes at all (yet?). This will require much deeper surgery to fix, which I
might do in follow-up patches.

Closes #105978.
---
 llvm/include/llvm/CodeGen/TargetLowering.h    |  21 +-
 .../CodeGen/GlobalISel/LegalizerHelper.cpp    |  11 +-
 .../CodeGen/SelectionDAG/TargetLowering.cpp   |  12 +-
 .../Target/AArch64/AArch64ISelLowering.cpp    |  30 +-
 llvm/lib/Target/AArch64/AArch64ISelLowering.h |   8 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.cpp     |   5 +-
 llvm/lib/Target/AMDGPU/SIISelLowering.h       |   4 +-
 llvm/lib/Target/ARM/ARMISelLowering.cpp       |   9 +-
 llvm/lib/Target/ARM/ARMISelLowering.h         |   3 +-
 llvm/lib/Target/BPF/BPFISelLowering.h         |   4 +-
 .../Target/Hexagon/HexagonISelLowering.cpp    |   3 +-
 llvm/lib/Target/Hexagon/HexagonISelLowering.h |   4 +-
 llvm/lib/Target/Mips/MipsISelLowering.cpp     |   8 -
 llvm/lib/Target/Mips/MipsISelLowering.h       |   3 -
 llvm/lib/Target/PowerPC/PPCISelLowering.cpp   |  14 +-
 llvm/lib/Target/PowerPC/PPCISelLowering.h     |   3 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.cpp   |   9 +-
 llvm/lib/Target/RISCV/RISCVISelLowering.h     |   4 +-
 .../Target/SystemZ/SystemZISelLowering.cpp    |   5 +-
 llvm/lib/Target/SystemZ/SystemZISelLowering.h |   4 +-
 llvm/lib/Target/X86/X86ISelLowering.h         |   3 +-
 llvm/lib/Target/X86/X86ISelLoweringCall.cpp   |   7 +-
 llvm/test/CodeGen/ARM/memop-soft-float.ll     | 128 +++++
 llvm/test/CodeGen/Mips/memop-soft-float.ll    | 234 +++++++++
 llvm/test/CodeGen/PowerPC/memop-soft-float.ll | 296 +++++++++++
 llvm/test/CodeGen/SPARC/memop-soft-float.ll   | 128 +++++
 llvm/test/CodeGen/SystemZ/memop-soft-float.ll |  51 ++
 llvm/test/CodeGen/X86/memop-soft-float.ll     | 464 ++++++++++++++++++
 28 files changed, 1389 insertions(+), 86 deletions(-)
 create mode 100644 llvm/test/CodeGen/ARM/memop-soft-float.ll
 create mode 100644 llvm/test/CodeGen/Mips/memop-soft-float.ll
 create mode 100644 llvm/test/CodeGen/PowerPC/memop-soft-float.ll
 create mode 100644 llvm/test/CodeGen/SPARC/memop-soft-float.ll
 create mode 100644 llvm/test/CodeGen/SystemZ/memop-soft-float.ll
 create mode 100644 llvm/test/CodeGen/X86/memop-soft-float.ll

diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index eda38cd8a564d6..6b56da2b9e3040 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -1957,19 +1957,22 @@ class TargetLoweringBase {
                           unsigned *Fast = nullptr) const;
 
   /// Returns the target specific optimal type for load and store operations as
-  /// a result of memset, memcpy, and memmove lowering.
-  /// It returns EVT::Other if the type should be determined using generic
-  /// target-independent logic.
-  virtual EVT
-  getOptimalMemOpType(const MemOp &Op,
-                      const AttributeList & /*FuncAttributes*/) const {
+  /// a result of memset, memcpy, and memmove lowering. It returns EVT::Other if
+  /// the type should be determined using generic target-independent logic.
+  ///
+  /// The PreferIntScalar parameter is a hint from the caller; if true *and* the
+  /// implementation returns a float or vector type, the caller will discard the
+  /// result and proceed as if EVT::Other had been returned.
+  virtual EVT getOptimalMemOpType(const MemOp &Op,
+                                  const AttributeList & /*FuncAttributes*/,
+                                  bool /*PreferIntScalar*/) const {
     return MVT::Other;
   }
 
   /// LLT returning variant.
-  virtual LLT
-  getOptimalMemOpLLT(const MemOp &Op,
-                     const AttributeList & /*FuncAttributes*/) const {
+  virtual LLT getOptimalMemOpLLT(const MemOp &Op,
+                                 const AttributeList & /*FuncAttributes*/,
+                                 bool /*PreferIntScalar*/) const {
     return LLT();
   }
 
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 3fece81df1f2fd..c3fe0c46f8d8d5 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -8846,9 +8846,14 @@ static bool findGISelOptimalMemOpLowering(std::vector<LLT> &MemOps,
   if (Op.isMemcpyWithFixedDstAlign() && Op.getSrcAlign() < Op.getDstAlign())
     return false;
 
-  LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes);
-
-  if (Ty == LLT()) {
+  bool WantIntScalar = TLI.useSoftFloat() ||
+                       FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
+  LLT Ty = TLI.getOptimalMemOpLLT(Op, FuncAttributes, WantIntScalar);
+
+  // The target may well report supporting vector operations to do the
+  // operation, but we don't want to use those as a matter of policy if we're
+  // using soft float or if implicit float operations are disallowed.
+  if (Ty == LLT() || (WantIntScalar && Ty.isVector())) {
     // Use the largest scalar type whose alignment constraints are satisfied.
     // We only need to check DstAlign here as SrcAlign is always greater or
     // equal to DstAlign (or zero).
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 4e796289cff0a1..4a5e52bc3a8307 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -203,9 +203,15 @@ bool TargetLowering::findOptimalMemOpLowering(
       Op.getSrcAlign() < Op.getDstAlign())
     return false;
 
-  EVT VT = getOptimalMemOpType(Op, FuncAttributes);
-
-  if (VT == MVT::Other) {
+  bool WantIntScalar =
+      useSoftFloat() || FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
+  EVT VT = getOptimalMemOpType(Op, FuncAttributes, WantIntScalar);
+
+  // The target may well report supporting float/vector operations to do the
+  // operation, but we don't want to use those as a matter of policy if we're
+  // using soft float or if implicit float operations are disallowed.
+  if (VT == MVT::Other ||
+      (WantIntScalar && (VT.isVector() || VT.isFloatingPoint()))) {
     // Use the largest integer type whose alignment constraints are satisfied.
     // We only need to check DstAlign here as SrcAlign is always greater or
     // equal to DstAlign (or zero).
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index 5ac5b7f8a5ab18..cc9a01562bc779 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -1188,7 +1188,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
           ISD::FSIN,              ISD::FCOS,           ISD::FTAN,
           ISD::FASIN,             ISD::FACOS,          ISD::FATAN,
           ISD::FSINH,             ISD::FCOSH,          ISD::FTANH,
-          ISD::FPOW,              ISD::FLOG,           ISD::FLOG2,          
+          ISD::FPOW,              ISD::FLOG,           ISD::FLOG2,
           ISD::FLOG10,            ISD::FEXP,           ISD::FEXP2,
           ISD::FEXP10,            ISD::FRINT,          ISD::FROUND,
           ISD::FROUNDEVEN,        ISD::FTRUNC,         ISD::FMINNUM,
@@ -1196,7 +1196,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
           ISD::STRICT_FADD,       ISD::STRICT_FSUB,    ISD::STRICT_FMUL,
           ISD::STRICT_FDIV,       ISD::STRICT_FMA,     ISD::STRICT_FCEIL,
           ISD::STRICT_FFLOOR,     ISD::STRICT_FSQRT,   ISD::STRICT_FRINT,
-          ISD::STRICT_FNEARBYINT, ISD::STRICT_FROUND,  ISD::STRICT_FTRUNC,  
+          ISD::STRICT_FNEARBYINT, ISD::STRICT_FROUND,  ISD::STRICT_FTRUNC,
           ISD::STRICT_FROUNDEVEN, ISD::STRICT_FMINNUM, ISD::STRICT_FMAXNUM,
           ISD::STRICT_FMINIMUM,   ISD::STRICT_FMAXIMUM})
       setOperationAction(Op, MVT::v1f64, Expand);
@@ -17657,10 +17657,10 @@ bool AArch64TargetLowering::lowerInterleaveIntrinsicToStore(
 }
 
 EVT AArch64TargetLowering::getOptimalMemOpType(
-    const MemOp &Op, const AttributeList &FuncAttributes) const {
-  bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
-  bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
-  bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
+    const MemOp &Op, const AttributeList &FuncAttributes,
+    bool PreferIntScalar) const {
+  bool CanUseNEON = Subtarget->hasNEON();
+  bool CanUseFP = Subtarget->hasFPARMv8();
   // Only use AdvSIMD to implement memset of 32-byte and above. It would have
   // taken one instruction to materialize the v2i64 zero and one store (with
   // restrictive addressing mode). Just do i64 stores.
@@ -17679,18 +17679,15 @@ EVT AArch64TargetLowering::getOptimalMemOpType(
     return MVT::v16i8;
   if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
     return MVT::f128;
-  if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
-    return MVT::i64;
-  if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
-    return MVT::i32;
+
   return MVT::Other;
 }
 
 LLT AArch64TargetLowering::getOptimalMemOpLLT(
-    const MemOp &Op, const AttributeList &FuncAttributes) const {
-  bool CanImplicitFloat = !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat);
-  bool CanUseNEON = Subtarget->hasNEON() && CanImplicitFloat;
-  bool CanUseFP = Subtarget->hasFPARMv8() && CanImplicitFloat;
+    const MemOp &Op, const AttributeList &FuncAttributes,
+    bool PreferIntScalar) const {
+  bool CanUseNEON = Subtarget->hasNEON();
+  bool CanUseFP = Subtarget->hasFPARMv8();
   // Only use AdvSIMD to implement memset of 32-byte and above. It would have
   // taken one instruction to materialize the v2i64 zero and one store (with
   // restrictive addressing mode). Just do i64 stores.
@@ -17709,10 +17706,7 @@ LLT AArch64TargetLowering::getOptimalMemOpLLT(
     return LLT::fixed_vector(2, 64);
   if (CanUseFP && !IsSmallMemset && AlignmentIsAcceptable(MVT::f128, Align(16)))
     return LLT::scalar(128);
-  if (Op.size() >= 8 && AlignmentIsAcceptable(MVT::i64, Align(8)))
-    return LLT::scalar(64);
-  if (Op.size() >= 4 && AlignmentIsAcceptable(MVT::i32, Align(4)))
-    return LLT::scalar(32);
+
   return LLT();
 }
 
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 39d5df0de0eec7..77d429be81056a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -720,11 +720,11 @@ class AArch64TargetLowering : public TargetLowering {
 
   bool shouldConsiderGEPOffsetSplit() const override;
 
-  EVT getOptimalMemOpType(const MemOp &Op,
-                          const AttributeList &FuncAttributes) const override;
+  EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
+                          bool PreferIntScalar) const override;
 
-  LLT getOptimalMemOpLLT(const MemOp &Op,
-                         const AttributeList &FuncAttributes) const override;
+  LLT getOptimalMemOpLLT(const MemOp &Op, const AttributeList &FuncAttributes,
+                         bool PreferIntScalar) const override;
 
   /// Return true if the addressing mode represented by AM is legal for this
   /// target, for a load/store of the specified type.
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 1437f3d58b5e79..a31b873b9c1ecc 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -1861,8 +1861,9 @@ bool SITargetLowering::allowsMisalignedMemoryAccesses(
                                             Alignment, Flags, IsFast);
 }
 
-EVT SITargetLowering::getOptimalMemOpType(
-    const MemOp &Op, const AttributeList &FuncAttributes) const {
+EVT SITargetLowering::getOptimalMemOpType(const MemOp &Op,
+                                          const AttributeList &FuncAttributes,
+                                          bool PreferIntScalar) const {
   // FIXME: Should account for address space here.
 
   // The default fallback uses the private pointer size as a guess for a type to
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index eed4b3e79cdeee..fd58bd1f936286 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -344,8 +344,8 @@ class SITargetLowering final : public AMDGPUTargetLowering {
       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
       unsigned *IsFast = nullptr) const override;
 
-  EVT getOptimalMemOpType(const MemOp &Op,
-                          const AttributeList &FuncAttributes) const override;
+  EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
+                          bool PreferIntScalar) const override;
 
   bool isMemOpUniform(const SDNode *N) const;
   bool isMemOpHasNoClobberedMemOperand(const SDNode *N) const;
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 4ab0433069ae66..9288c9dc3b469b 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -19189,12 +19189,11 @@ bool ARMTargetLowering::allowsMisalignedMemoryAccesses(EVT VT, unsigned,
   return false;
 }
 
-
-EVT ARMTargetLowering::getOptimalMemOpType(
-    const MemOp &Op, const AttributeList &FuncAttributes) const {
+EVT ARMTargetLowering::getOptimalMemOpType(const MemOp &Op,
+                                           const AttributeList &FuncAttributes,
+                                           bool PreferIntScalar) const {
   // See if we can use NEON instructions for this...
-  if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON() &&
-      !FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) {
+  if ((Op.isMemcpy() || Op.isZeroMemset()) && Subtarget->hasNEON()) {
     unsigned Fast;
     if (Op.size() >= 16 &&
         (Op.isAligned(Align(16)) ||
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.h b/llvm/lib/Target/ARM/ARMISelLowering.h
index a255e9b6fc365f..e8d8eab0c9a031 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.h
+++ b/llvm/lib/Target/ARM/ARMISelLowering.h
@@ -455,7 +455,8 @@ class VectorType;
                                         unsigned *Fast) const override;
 
     EVT getOptimalMemOpType(const MemOp &Op,
-                            const AttributeList &FuncAttributes) const override;
+                            const AttributeList &FuncAttributes,
+                            bool PreferIntScalar) const override;
 
     bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
     bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
diff --git a/llvm/lib/Target/BPF/BPFISelLowering.h b/llvm/lib/Target/BPF/BPFISelLowering.h
index 42707949e864cd..b4dec3593f2241 100644
--- a/llvm/lib/Target/BPF/BPFISelLowering.h
+++ b/llvm/lib/Target/BPF/BPFISelLowering.h
@@ -113,8 +113,8 @@ class BPFTargetLowering : public TargetLowering {
   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
                           SelectionDAG &DAG) const override;
 
-  EVT getOptimalMemOpType(const MemOp &Op,
-                          const AttributeList &FuncAttributes) const override {
+  EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
+                          bool PreferIntScalar) const override {
     return Op.size() >= 8 ? MVT::i64 : MVT::i32;
   }
 
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index 856c952e785dac..83023021d79d5a 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -3783,7 +3783,8 @@ bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
 /// does not need to be loaded.  It returns EVT::Other if the type should be
 /// determined using generic target-independent logic.
 EVT HexagonTargetLowering::getOptimalMemOpType(
-    const MemOp &Op, const AttributeList &FuncAttributes) const {
+    const MemOp &Op, const AttributeList &FuncAttributes,
+    bool PreferIntScalar) const {
   if (Op.size() >= 8 && Op.isAligned(Align(8)))
     return MVT::i64;
   if (Op.size() >= 4 && Op.isAligned(Align(4)))
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.h b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
index 3fd961f5a74623..8a4840bf02236b 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.h
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.h
@@ -325,8 +325,8 @@ class HexagonTargetLowering : public TargetLowering {
   /// the immediate into a register.
   bool isLegalICmpImmediate(int64_t Imm) const override;
 
-  EVT getOptimalMemOpType(const MemOp &Op,
-                          const AttributeList &FuncAttributes) const override;
+  EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
+                          bool PreferIntScalar) const override;
 
   bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
                           unsigned AddrSpace, Align Alignment,
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.cpp b/llvm/lib/Target/Mips/MipsISelLowering.cpp
index 0f2047fcac640e..4448a502ec4db3 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.cpp
+++ b/llvm/lib/Target/Mips/MipsISelLowering.cpp
@@ -4331,14 +4331,6 @@ MipsTargetLowering::isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
   return false;
 }
 
-EVT MipsTargetLowering::getOptimalMemOpType(
-    const MemOp &Op, const AttributeList &FuncAttributes) const {
-  if (Subtarget.hasMips64())
-    return MVT::i64;
-
-  return MVT::i32;
-}
-
 bool MipsTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
                                       bool ForCodeSize) const {
   if (VT != MVT::f32 && VT != MVT::f64)
diff --git a/llvm/lib/Target/Mips/MipsISelLowering.h b/llvm/lib/Target/Mips/MipsISelLowering.h
index 84ad40d6bbbe26..e2a077bd9104af 100644
--- a/llvm/lib/Target/Mips/MipsISelLowering.h
+++ b/llvm/lib/Target/Mips/MipsISelLowering.h
@@ -664,9 +664,6 @@ class TargetRegisterClass;
 
     bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
 
-    EVT getOptimalMemOpType(const MemOp &Op,
-                            const AttributeList &FuncAttributes) const override;
-
     /// isFPImmLegal - Returns true if the target can instruction select the
     /// specified FP immediate natively. If false, the legalizer will
     /// materialize the FP immediate as a load from a constant pool.
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 459a96eca1ff20..03395b3d08d835 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -6512,7 +6512,7 @@ SDValue PPCTargetLowering::LowerCall_64SVR4(
         ArgOffset += PtrByteSize;
         continue;
       }
-      // Copy the object to parameter save area if it can not be entirely passed 
+      // Copy the object to parameter save area if it can not be entirely passed
       // by registers.
       // FIXME: we only need to copy the parts which need to be passed in
       // parameter save area. For the parts passed by registers, we don't need
@@ -7159,7 +7159,7 @@ static unsigned mapArgRegToOffsetAIX(unsigned Reg, const PPCFrameLowering *FL) {
 //
 //   Low Memory +--------------------------------------------+
 //   SP   +---> | Back chain                                 | ---+
-//        |     +--------------------------------------------+    |   
+//        |     +--------------------------------------------+    |
 //        |     | Saved Condition Register                   |    |
 //        |     +--------------------------------------------+    |
 //        |     | Saved Linkage Register                     |    |
@@ -8142,7 +8142,7 @@ SDValue PPCTargetLowering::LowerTRUNCATEVector(SDValue Op,
     return SDValue();
 
   SDValue N1 = Op.getOperand(0);
-  EVT SrcVT = N1.getValueType();  
+  EVT SrcVT = N1.getValueType();
   unsigned SrcSize = SrcVT.getSizeInBits();
   if (SrcSize > 256 || !isPowerOf2_32(SrcVT.getVectorNumElements()) ||
       !llvm::has_single_bit<uint32_t>(
@@ -17399,9 +17399,11 @@ bool PPCTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
 
 /// It returns EVT::Other if the type should be determined using generic
 /// target-independent logic.
-EVT PPCTargetLowering::getOptimalMemOpType(
-    const MemOp &Op, const AttributeList &FuncAttributes) const {
-  if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None) {
+EVT PPCTargetLowering::getOptimalMemOpType(const MemOp &Op,
+                                           const AttributeList &FuncAttributes,
+                                           bool PreferIntScalar) const {
+  if (getTargetMachine().getOptLevel() != CodeGenOptLevel::None &&
+      !PreferIntScalar) {
     // We should use Altivec/VSX loads and stores when available. For unaligned
     // addresses, unaligned VSX loads are only fast starting with the P8.
     if (Subtarget.hasAltivec() && Op.size() >= 16) {
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.h b/llvm/lib/Target/PowerPC/PPCISelLowering.h
index 0bdfdcd15441f4..978a2ed6a606de 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.h
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.h
@@ -1072,7 +1072,8 @@ namespace llvm {
     /// It returns EVT::Other if the type should be determined using generic
     /// target-independent logic.
     EVT getOptimalMemOpType(const MemOp &Op,
-                            const AttributeList &FuncAttributes) const override;
+                            const AttributeList &FuncAttributes,
+                            bool PreferIntScalar) const override;
 
     /// Is unaligned memory access allowed for the given type, and is it fast
     /// relative to software emulation.
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 670dee2edb1dfb..98d81beb168b2f 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -21045,15 +21045,12 @@ bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
   return Subtarget.enableUnalignedVectorMem();
 }
 
-
-EVT RISCVTargetLowering::getOptimalMemOpType(const MemOp &Op,
-                                             const AttributeList &FuncAttributes) const {
+EVT RISCVTargetLowering::getOptimalMemOpType(
+    const MemOp &Op, const AttributeList &FuncAttributes,
+    bool PreferIntScalar) const {
   if (!Subtarget.hasVInstructions())
     return MVT::Other;
 
-  if (FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat))
-    return MVT::Other;
-
   // We use LMUL1 memory operations here for a non-obvious reason.  Our caller
   // has an expansion threshold, and we want the number of hardware memory
   // operations to correspond roughly to that threshold.  LMUL>1 operations
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 1b91ab43a4637f..581a0964e8da01 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -782,8 +782,8 @@ class RISCVTargetLowering : public TargetLowering {
       MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
       unsigned *Fast = nullptr) const override;
 
-  EVT getOptimalMemOpType(const MemOp &Op,
-                          const AttributeList &FuncAttributes) const override;
+  EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
+                          bool PreferIntScalar) const override;
 
   bool splitValueIntoRegisterParts(
       SelectionDAG & DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
index 6f84bd6c6e4ff4..bcd7238b11471c 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.cpp
@@ -1132,8 +1132,9 @@ bool SystemZTargetLowering::findOptimalMemOpLowering(
                                                   SrcAS, FuncAttributes);
 }
 
-EVT SystemZTargetLowering::getOptimalMemOpType(const MemOp &Op,
-                                   const AttributeList &FuncAttributes) const {
+EVT SystemZTargetLowering::getOptimalMemOpType(
+    const MemOp &Op, const AttributeList &FuncAttributes,
+    bool PreferIntScalar) const {
   return Subtarget.hasVector() ? MVT::v2i64 : MVT::Other;
 }
 
diff --git a/llvm/lib/Target/SystemZ/SystemZISelLowering.h b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
index 1e7285e3e0fc53..1f512d027287df 100644
--- a/llvm/lib/Target/SystemZ/SystemZISelLowering.h
+++ b/llvm/lib/Target/SystemZ/SystemZISelLowering.h
@@ -493,8 +493,8 @@ class SystemZTargetLowering : public TargetLowering {
   findOptimalMemOpLowering(std::vector<EVT> &MemOps, unsigned Limit,
                            const MemOp &Op, unsigned DstAS, unsigned SrcAS,
                            const AttributeList &FuncAttributes) const override;
-  EVT getOptimalMemOpType(const MemOp &Op,
-                          const AttributeList &FuncAttributes) const override;
+  EVT getOptimalMemOpType(const MemOp &Op, const AttributeList &FuncAttributes,
+                          bool PreferIntScalar) const override;
   bool isTruncateFree(Type *, Type *) const override;
   bool isTruncateFree(EVT, EVT) const override;
 
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 93d2b3e65742b2..0f137a4f4a2e02 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -1057,7 +1057,8 @@ namespace llvm {
                                    const DataLayout &DL) const override;
 
     EVT getOptimalMemOpType(const MemOp &Op,
-                            const AttributeList &FuncAttributes) const override;
+                            const AttributeList &FuncAttributes,
+                            bool PreferIntScalar) const override;
 
     /// Returns true if it's safe to use load / store of the
     /// specified type to expand memcpy / memset inline. This is mostly true
diff --git a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
index ab1eeb4111ccdb..b66ca190fcee64 100644
--- a/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
+++ b/llvm/lib/Target/X86/X86ISelLoweringCall.cpp
@@ -279,9 +279,10 @@ uint64_t X86TargetLowering::getByValTypeAlignment(Type *Ty,
 /// target-independent logic.
 /// For vector ops we check that the overall size isn't larger than our
 /// preferred vector width.
-EVT X86TargetLowering::getOptimalMemOpType(
-    const MemOp &Op, const AttributeList &FuncAttributes) const {
-  if (!FuncAttributes.hasFnAttr(Attribute::NoImplicitFloat)) {
+EVT X86TargetLowering::getOptimalMemOpType(const MemOp &Op,
+                                           const AttributeList &FuncAttributes,
+                                           bool PreferIntScalar) const {
+  if (!PreferIntScalar) {
     if (Op.size() >= 16 &&
         (!Subtarget.isUnalignedMem16Slow() || Op.isAligned(Align(16)))) {
       // FIXME: Check if unaligned 64-byte accesses are slow.
diff --git a/llvm/test/CodeGen/ARM/memop-soft-float.ll b/llvm/test/CodeGen/ARM/memop-soft-float.ll
new file mode 100644
index 00000000000000..35718c28f26c2f
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/memop-soft-float.ll
@@ -0,0 +1,128 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=arm < %s | FileCheck %s -check-prefix ARM
+; RUN: llc -mtriple=arm -mattr +neon < %s | FileCheck %s -check-prefix ARM-NEON
+
+define void @memop_soft_float() "use-soft-float"="true" {
+; ARM-LABEL: memop_soft_float:
+; ARM:       @ %bb.0:
+; ARM-NEXT:    push {r4, lr}
+; ARM-NEXT:    mov r2, #0
+; ARM-NEXT:    mov lr, #0
+; ARM-NEXT:    ldm r2!, {r0, r1, r3, r4}
+; ARM-NEXT:    mov r12, #0
+; ARM-NEXT:    stm lr!, {r0, r1, r3, r4}
+; ARM-NEXT:    ldm r2, {r0, r1, r3, r4}
+; ARM-NEXT:    stm lr, {r0, r1, r3, r4}
+; ARM-NEXT:    mov r0, #28
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #24
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #20
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #16
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #12
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #8
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #4
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    str r12, [r12]
+; ARM-NEXT:    pop {r4, lr}
+; ARM-NEXT:    mov pc, lr
+;
+; ARM-NEON-LABEL: memop_soft_float:
+; ARM-NEON:       @ %bb.0:
+; ARM-NEON-NEXT:    push {r4, lr}
+; ARM-NEON-NEXT:    mov r2, #0
+; ARM-NEON-NEXT:    mov lr, #0
+; ARM-NEON-NEXT:    ldm r2!, {r0, r1, r3, r4}
+; ARM-NEON-NEXT:    mov r12, #0
+; ARM-NEON-NEXT:    stm lr!, {r0, r1, r3, r4}
+; ARM-NEON-NEXT:    ldm r2, {r0, r1, r3, r4}
+; ARM-NEON-NEXT:    stm lr, {r0, r1, r3, r4}
+; ARM-NEON-NEXT:    mov r0, #28
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #24
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #20
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #16
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #12
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #8
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #4
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    str r12, [r12]
+; ARM-NEON-NEXT:    pop {r4, lr}
+; ARM-NEON-NEXT:    mov pc, lr
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+define void @memop_noimplicitfloat() noimplicitfloat {
+; ARM-LABEL: memop_noimplicitfloat:
+; ARM:       @ %bb.0:
+; ARM-NEXT:    push {r4, lr}
+; ARM-NEXT:    mov r2, #0
+; ARM-NEXT:    mov lr, #0
+; ARM-NEXT:    ldm r2!, {r0, r1, r3, r4}
+; ARM-NEXT:    mov r12, #0
+; ARM-NEXT:    stm lr!, {r0, r1, r3, r4}
+; ARM-NEXT:    ldm r2, {r0, r1, r3, r4}
+; ARM-NEXT:    stm lr, {r0, r1, r3, r4}
+; ARM-NEXT:    mov r0, #28
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #24
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #20
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #16
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #12
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #8
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    mov r0, #4
+; ARM-NEXT:    str r12, [r0]
+; ARM-NEXT:    str r12, [r12]
+; ARM-NEXT:    pop {r4, lr}
+; ARM-NEXT:    mov pc, lr
+;
+; ARM-NEON-LABEL: memop_noimplicitfloat:
+; ARM-NEON:       @ %bb.0:
+; ARM-NEON-NEXT:    push {r4, lr}
+; ARM-NEON-NEXT:    mov r2, #0
+; ARM-NEON-NEXT:    mov lr, #0
+; ARM-NEON-NEXT:    ldm r2!, {r0, r1, r3, r4}
+; ARM-NEON-NEXT:    mov r12, #0
+; ARM-NEON-NEXT:    stm lr!, {r0, r1, r3, r4}
+; ARM-NEON-NEXT:    ldm r2, {r0, r1, r3, r4}
+; ARM-NEON-NEXT:    stm lr, {r0, r1, r3, r4}
+; ARM-NEON-NEXT:    mov r0, #28
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #24
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #20
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #16
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #12
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #8
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    mov r0, #4
+; ARM-NEON-NEXT:    str r12, [r0]
+; ARM-NEON-NEXT:    str r12, [r12]
+; ARM-NEON-NEXT:    pop {r4, lr}
+; ARM-NEON-NEXT:    mov pc, lr
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: write)
diff --git a/llvm/test/CodeGen/Mips/memop-soft-float.ll b/llvm/test/CodeGen/Mips/memop-soft-float.ll
new file mode 100644
index 00000000000000..55e088075dcad3
--- /dev/null
+++ b/llvm/test/CodeGen/Mips/memop-soft-float.ll
@@ -0,0 +1,234 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=mips < %s | FileCheck %s -check-prefix MIPS-32-BE
+; RUN: llc -mtriple=mipsel < %s | FileCheck %s -check-prefix MIPS-32-LE
+; RUN: llc -mtriple=mips64 < %s | FileCheck %s -check-prefix MIPS-64-BE
+; RUN: llc -mtriple=mips64el < %s | FileCheck %s -check-prefix MIPS-64-LE
+
+define void @memop_soft_float() "use-soft-float"="true" {
+; MIPS-32-BE-LABEL: memop_soft_float:
+; MIPS-32-BE:       # %bb.0:
+; MIPS-32-BE-NEXT:    addiu $1, $zero, 20
+; MIPS-32-BE-NEXT:    addiu $2, $zero, 16
+; MIPS-32-BE-NEXT:    addiu $3, $zero, 12
+; MIPS-32-BE-NEXT:    addiu $4, $zero, 8
+; MIPS-32-BE-NEXT:    lw $5, 0($4)
+; MIPS-32-BE-NEXT:    lw $6, 0($3)
+; MIPS-32-BE-NEXT:    lw $7, 0($2)
+; MIPS-32-BE-NEXT:    lw $8, 0($1)
+; MIPS-32-BE-NEXT:    addiu $9, $zero, 24
+; MIPS-32-BE-NEXT:    lw $10, 0($9)
+; MIPS-32-BE-NEXT:    addiu $11, $zero, 28
+; MIPS-32-BE-NEXT:    lw $12, 0($11)
+; MIPS-32-BE-NEXT:    sw $12, 0($11)
+; MIPS-32-BE-NEXT:    sw $10, 0($9)
+; MIPS-32-BE-NEXT:    sw $8, 0($1)
+; MIPS-32-BE-NEXT:    sw $7, 0($2)
+; MIPS-32-BE-NEXT:    sw $6, 0($3)
+; MIPS-32-BE-NEXT:    sw $5, 0($4)
+; MIPS-32-BE-NEXT:    addiu $5, $zero, 4
+; MIPS-32-BE-NEXT:    lw $6, 0($5)
+; MIPS-32-BE-NEXT:    sw $6, 0($5)
+; MIPS-32-BE-NEXT:    lw $6, 0($zero)
+; MIPS-32-BE-NEXT:    sw $6, 0($zero)
+; MIPS-32-BE-NEXT:    sw $zero, 0($11)
+; MIPS-32-BE-NEXT:    sw $zero, 0($9)
+; MIPS-32-BE-NEXT:    sw $zero, 0($1)
+; MIPS-32-BE-NEXT:    sw $zero, 0($2)
+; MIPS-32-BE-NEXT:    sw $zero, 0($3)
+; MIPS-32-BE-NEXT:    sw $zero, 0($4)
+; MIPS-32-BE-NEXT:    sw $zero, 0($5)
+; MIPS-32-BE-NEXT:    jr $ra
+; MIPS-32-BE-NEXT:    sw $zero, 0($zero)
+;
+; MIPS-32-LE-LABEL: memop_soft_float:
+; MIPS-32-LE:       # %bb.0:
+; MIPS-32-LE-NEXT:    addiu $1, $zero, 20
+; MIPS-32-LE-NEXT:    addiu $2, $zero, 16
+; MIPS-32-LE-NEXT:    addiu $3, $zero, 12
+; MIPS-32-LE-NEXT:    addiu $4, $zero, 8
+; MIPS-32-LE-NEXT:    lw $5, 0($4)
+; MIPS-32-LE-NEXT:    lw $6, 0($3)
+; MIPS-32-LE-NEXT:    lw $7, 0($2)
+; MIPS-32-LE-NEXT:    lw $8, 0($1)
+; MIPS-32-LE-NEXT:    addiu $9, $zero, 24
+; MIPS-32-LE-NEXT:    lw $10, 0($9)
+; MIPS-32-LE-NEXT:    addiu $11, $zero, 28
+; MIPS-32-LE-NEXT:    lw $12, 0($11)
+; MIPS-32-LE-NEXT:    sw $12, 0($11)
+; MIPS-32-LE-NEXT:    sw $10, 0($9)
+; MIPS-32-LE-NEXT:    sw $8, 0($1)
+; MIPS-32-LE-NEXT:    sw $7, 0($2)
+; MIPS-32-LE-NEXT:    sw $6, 0($3)
+; MIPS-32-LE-NEXT:    sw $5, 0($4)
+; MIPS-32-LE-NEXT:    addiu $5, $zero, 4
+; MIPS-32-LE-NEXT:    lw $6, 0($5)
+; MIPS-32-LE-NEXT:    sw $6, 0($5)
+; MIPS-32-LE-NEXT:    lw $6, 0($zero)
+; MIPS-32-LE-NEXT:    sw $6, 0($zero)
+; MIPS-32-LE-NEXT:    sw $zero, 0($11)
+; MIPS-32-LE-NEXT:    sw $zero, 0($9)
+; MIPS-32-LE-NEXT:    sw $zero, 0($1)
+; MIPS-32-LE-NEXT:    sw $zero, 0($2)
+; MIPS-32-LE-NEXT:    sw $zero, 0($3)
+; MIPS-32-LE-NEXT:    sw $zero, 0($4)
+; MIPS-32-LE-NEXT:    sw $zero, 0($5)
+; MIPS-32-LE-NEXT:    jr $ra
+; MIPS-32-LE-NEXT:    sw $zero, 0($zero)
+;
+; MIPS-64-BE-LABEL: memop_soft_float:
+; MIPS-64-BE:       # %bb.0:
+; MIPS-64-BE-NEXT:    daddiu $1, $zero, 16
+; MIPS-64-BE-NEXT:    ld $2, 0($1)
+; MIPS-64-BE-NEXT:    daddiu $3, $zero, 24
+; MIPS-64-BE-NEXT:    ld $4, 0($3)
+; MIPS-64-BE-NEXT:    sd $4, 0($3)
+; MIPS-64-BE-NEXT:    sd $2, 0($1)
+; MIPS-64-BE-NEXT:    daddiu $2, $zero, 8
+; MIPS-64-BE-NEXT:    ld $4, 0($2)
+; MIPS-64-BE-NEXT:    sd $4, 0($2)
+; MIPS-64-BE-NEXT:    ld $4, 0($zero)
+; MIPS-64-BE-NEXT:    sd $4, 0($zero)
+; MIPS-64-BE-NEXT:    sd $zero, 0($3)
+; MIPS-64-BE-NEXT:    sd $zero, 0($1)
+; MIPS-64-BE-NEXT:    sd $zero, 0($2)
+; MIPS-64-BE-NEXT:    jr $ra
+; MIPS-64-BE-NEXT:    sd $zero, 0($zero)
+;
+; MIPS-64-LE-LABEL: memop_soft_float:
+; MIPS-64-LE:       # %bb.0:
+; MIPS-64-LE-NEXT:    daddiu $1, $zero, 16
+; MIPS-64-LE-NEXT:    ld $2, 0($1)
+; MIPS-64-LE-NEXT:    daddiu $3, $zero, 24
+; MIPS-64-LE-NEXT:    ld $4, 0($3)
+; MIPS-64-LE-NEXT:    sd $4, 0($3)
+; MIPS-64-LE-NEXT:    sd $2, 0($1)
+; MIPS-64-LE-NEXT:    daddiu $2, $zero, 8
+; MIPS-64-LE-NEXT:    ld $4, 0($2)
+; MIPS-64-LE-NEXT:    sd $4, 0($2)
+; MIPS-64-LE-NEXT:    ld $4, 0($zero)
+; MIPS-64-LE-NEXT:    sd $4, 0($zero)
+; MIPS-64-LE-NEXT:    sd $zero, 0($3)
+; MIPS-64-LE-NEXT:    sd $zero, 0($1)
+; MIPS-64-LE-NEXT:    sd $zero, 0($2)
+; MIPS-64-LE-NEXT:    jr $ra
+; MIPS-64-LE-NEXT:    sd $zero, 0($zero)
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+define void @memop_noimplicitfloat() noimplicitfloat {
+; MIPS-32-BE-LABEL: memop_noimplicitfloat:
+; MIPS-32-BE:       # %bb.0:
+; MIPS-32-BE-NEXT:    addiu $1, $zero, 20
+; MIPS-32-BE-NEXT:    addiu $2, $zero, 16
+; MIPS-32-BE-NEXT:    addiu $3, $zero, 12
+; MIPS-32-BE-NEXT:    addiu $4, $zero, 8
+; MIPS-32-BE-NEXT:    lw $5, 0($4)
+; MIPS-32-BE-NEXT:    lw $6, 0($3)
+; MIPS-32-BE-NEXT:    lw $7, 0($2)
+; MIPS-32-BE-NEXT:    lw $8, 0($1)
+; MIPS-32-BE-NEXT:    addiu $9, $zero, 24
+; MIPS-32-BE-NEXT:    lw $10, 0($9)
+; MIPS-32-BE-NEXT:    addiu $11, $zero, 28
+; MIPS-32-BE-NEXT:    lw $12, 0($11)
+; MIPS-32-BE-NEXT:    sw $12, 0($11)
+; MIPS-32-BE-NEXT:    sw $10, 0($9)
+; MIPS-32-BE-NEXT:    sw $8, 0($1)
+; MIPS-32-BE-NEXT:    sw $7, 0($2)
+; MIPS-32-BE-NEXT:    sw $6, 0($3)
+; MIPS-32-BE-NEXT:    sw $5, 0($4)
+; MIPS-32-BE-NEXT:    addiu $5, $zero, 4
+; MIPS-32-BE-NEXT:    lw $6, 0($5)
+; MIPS-32-BE-NEXT:    sw $6, 0($5)
+; MIPS-32-BE-NEXT:    lw $6, 0($zero)
+; MIPS-32-BE-NEXT:    sw $6, 0($zero)
+; MIPS-32-BE-NEXT:    sw $zero, 0($11)
+; MIPS-32-BE-NEXT:    sw $zero, 0($9)
+; MIPS-32-BE-NEXT:    sw $zero, 0($1)
+; MIPS-32-BE-NEXT:    sw $zero, 0($2)
+; MIPS-32-BE-NEXT:    sw $zero, 0($3)
+; MIPS-32-BE-NEXT:    sw $zero, 0($4)
+; MIPS-32-BE-NEXT:    sw $zero, 0($5)
+; MIPS-32-BE-NEXT:    jr $ra
+; MIPS-32-BE-NEXT:    sw $zero, 0($zero)
+;
+; MIPS-32-LE-LABEL: memop_noimplicitfloat:
+; MIPS-32-LE:       # %bb.0:
+; MIPS-32-LE-NEXT:    addiu $1, $zero, 20
+; MIPS-32-LE-NEXT:    addiu $2, $zero, 16
+; MIPS-32-LE-NEXT:    addiu $3, $zero, 12
+; MIPS-32-LE-NEXT:    addiu $4, $zero, 8
+; MIPS-32-LE-NEXT:    lw $5, 0($4)
+; MIPS-32-LE-NEXT:    lw $6, 0($3)
+; MIPS-32-LE-NEXT:    lw $7, 0($2)
+; MIPS-32-LE-NEXT:    lw $8, 0($1)
+; MIPS-32-LE-NEXT:    addiu $9, $zero, 24
+; MIPS-32-LE-NEXT:    lw $10, 0($9)
+; MIPS-32-LE-NEXT:    addiu $11, $zero, 28
+; MIPS-32-LE-NEXT:    lw $12, 0($11)
+; MIPS-32-LE-NEXT:    sw $12, 0($11)
+; MIPS-32-LE-NEXT:    sw $10, 0($9)
+; MIPS-32-LE-NEXT:    sw $8, 0($1)
+; MIPS-32-LE-NEXT:    sw $7, 0($2)
+; MIPS-32-LE-NEXT:    sw $6, 0($3)
+; MIPS-32-LE-NEXT:    sw $5, 0($4)
+; MIPS-32-LE-NEXT:    addiu $5, $zero, 4
+; MIPS-32-LE-NEXT:    lw $6, 0($5)
+; MIPS-32-LE-NEXT:    sw $6, 0($5)
+; MIPS-32-LE-NEXT:    lw $6, 0($zero)
+; MIPS-32-LE-NEXT:    sw $6, 0($zero)
+; MIPS-32-LE-NEXT:    sw $zero, 0($11)
+; MIPS-32-LE-NEXT:    sw $zero, 0($9)
+; MIPS-32-LE-NEXT:    sw $zero, 0($1)
+; MIPS-32-LE-NEXT:    sw $zero, 0($2)
+; MIPS-32-LE-NEXT:    sw $zero, 0($3)
+; MIPS-32-LE-NEXT:    sw $zero, 0($4)
+; MIPS-32-LE-NEXT:    sw $zero, 0($5)
+; MIPS-32-LE-NEXT:    jr $ra
+; MIPS-32-LE-NEXT:    sw $zero, 0($zero)
+;
+; MIPS-64-BE-LABEL: memop_noimplicitfloat:
+; MIPS-64-BE:       # %bb.0:
+; MIPS-64-BE-NEXT:    daddiu $1, $zero, 16
+; MIPS-64-BE-NEXT:    ld $2, 0($1)
+; MIPS-64-BE-NEXT:    daddiu $3, $zero, 24
+; MIPS-64-BE-NEXT:    ld $4, 0($3)
+; MIPS-64-BE-NEXT:    sd $4, 0($3)
+; MIPS-64-BE-NEXT:    sd $2, 0($1)
+; MIPS-64-BE-NEXT:    daddiu $2, $zero, 8
+; MIPS-64-BE-NEXT:    ld $4, 0($2)
+; MIPS-64-BE-NEXT:    sd $4, 0($2)
+; MIPS-64-BE-NEXT:    ld $4, 0($zero)
+; MIPS-64-BE-NEXT:    sd $4, 0($zero)
+; MIPS-64-BE-NEXT:    sd $zero, 0($3)
+; MIPS-64-BE-NEXT:    sd $zero, 0($1)
+; MIPS-64-BE-NEXT:    sd $zero, 0($2)
+; MIPS-64-BE-NEXT:    jr $ra
+; MIPS-64-BE-NEXT:    sd $zero, 0($zero)
+;
+; MIPS-64-LE-LABEL: memop_noimplicitfloat:
+; MIPS-64-LE:       # %bb.0:
+; MIPS-64-LE-NEXT:    daddiu $1, $zero, 16
+; MIPS-64-LE-NEXT:    ld $2, 0($1)
+; MIPS-64-LE-NEXT:    daddiu $3, $zero, 24
+; MIPS-64-LE-NEXT:    ld $4, 0($3)
+; MIPS-64-LE-NEXT:    sd $4, 0($3)
+; MIPS-64-LE-NEXT:    sd $2, 0($1)
+; MIPS-64-LE-NEXT:    daddiu $2, $zero, 8
+; MIPS-64-LE-NEXT:    ld $4, 0($2)
+; MIPS-64-LE-NEXT:    sd $4, 0($2)
+; MIPS-64-LE-NEXT:    ld $4, 0($zero)
+; MIPS-64-LE-NEXT:    sd $4, 0($zero)
+; MIPS-64-LE-NEXT:    sd $zero, 0($3)
+; MIPS-64-LE-NEXT:    sd $zero, 0($1)
+; MIPS-64-LE-NEXT:    sd $zero, 0($2)
+; MIPS-64-LE-NEXT:    jr $ra
+; MIPS-64-LE-NEXT:    sd $zero, 0($zero)
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: write)
diff --git a/llvm/test/CodeGen/PowerPC/memop-soft-float.ll b/llvm/test/CodeGen/PowerPC/memop-soft-float.ll
new file mode 100644
index 00000000000000..a2f28c2d6a4f4a
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/memop-soft-float.ll
@@ -0,0 +1,296 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=powerpc < %s | FileCheck %s -check-prefix PPC-32
+; RUN: llc -mtriple=powerpc -mattr +altivec < %s | FileCheck %s -check-prefix PPC-32-ALTIVEC
+; RUN: llc -mtriple=powerpc -mattr +vsx < %s | FileCheck %s -check-prefix PPC-32-VSX
+; RUN: llc -mtriple=powerpc64 < %s | FileCheck %s -check-prefix PPC-64
+; RUN: llc -mtriple=powerpc64 -mattr +altivec < %s | FileCheck %s -check-prefix PPC-64-ALTIVEC
+; RUN: llc -mtriple=powerpc64 -mattr +vsx < %s | FileCheck %s -check-prefix PPC-64-VSX
+
+define void @memop_soft_float() "use-soft-float"="true" {
+; PPC-32-LABEL: memop_soft_float:
+; PPC-32:       # %bb.0:
+; PPC-32-NEXT:    lwz 3, 28(0)
+; PPC-32-NEXT:    stw 3, 28(0)
+; PPC-32-NEXT:    lwz 3, 24(0)
+; PPC-32-NEXT:    stw 3, 24(0)
+; PPC-32-NEXT:    lwz 3, 20(0)
+; PPC-32-NEXT:    stw 3, 20(0)
+; PPC-32-NEXT:    lwz 3, 16(0)
+; PPC-32-NEXT:    stw 3, 16(0)
+; PPC-32-NEXT:    lwz 3, 12(0)
+; PPC-32-NEXT:    stw 3, 12(0)
+; PPC-32-NEXT:    lwz 3, 8(0)
+; PPC-32-NEXT:    stw 3, 8(0)
+; PPC-32-NEXT:    lwz 3, 4(0)
+; PPC-32-NEXT:    stw 3, 4(0)
+; PPC-32-NEXT:    lwz 3, 0(0)
+; PPC-32-NEXT:    stw 3, 0(0)
+; PPC-32-NEXT:    li 3, 0
+; PPC-32-NEXT:    stw 3, 28(0)
+; PPC-32-NEXT:    stw 3, 24(0)
+; PPC-32-NEXT:    stw 3, 20(0)
+; PPC-32-NEXT:    stw 3, 16(0)
+; PPC-32-NEXT:    stw 3, 12(0)
+; PPC-32-NEXT:    stw 3, 8(0)
+; PPC-32-NEXT:    stw 3, 4(0)
+; PPC-32-NEXT:    stw 3, 0(0)
+; PPC-32-NEXT:    blr
+;
+; PPC-32-ALTIVEC-LABEL: memop_soft_float:
+; PPC-32-ALTIVEC:       # %bb.0:
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 28(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 28(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 24(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 24(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 20(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 20(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 16(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 16(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 12(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 12(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 8(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 8(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 4(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 4(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 0(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 0(0)
+; PPC-32-ALTIVEC-NEXT:    li 3, 0
+; PPC-32-ALTIVEC-NEXT:    stw 3, 28(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 24(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 20(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 16(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 12(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 8(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 4(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 0(0)
+; PPC-32-ALTIVEC-NEXT:    blr
+;
+; PPC-32-VSX-LABEL: memop_soft_float:
+; PPC-32-VSX:       # %bb.0:
+; PPC-32-VSX-NEXT:    lwz 3, 28(0)
+; PPC-32-VSX-NEXT:    stw 3, 28(0)
+; PPC-32-VSX-NEXT:    lwz 3, 24(0)
+; PPC-32-VSX-NEXT:    stw 3, 24(0)
+; PPC-32-VSX-NEXT:    lwz 3, 20(0)
+; PPC-32-VSX-NEXT:    stw 3, 20(0)
+; PPC-32-VSX-NEXT:    lwz 3, 16(0)
+; PPC-32-VSX-NEXT:    stw 3, 16(0)
+; PPC-32-VSX-NEXT:    lwz 3, 12(0)
+; PPC-32-VSX-NEXT:    stw 3, 12(0)
+; PPC-32-VSX-NEXT:    lwz 3, 8(0)
+; PPC-32-VSX-NEXT:    stw 3, 8(0)
+; PPC-32-VSX-NEXT:    lwz 3, 4(0)
+; PPC-32-VSX-NEXT:    stw 3, 4(0)
+; PPC-32-VSX-NEXT:    lwz 3, 0(0)
+; PPC-32-VSX-NEXT:    stw 3, 0(0)
+; PPC-32-VSX-NEXT:    li 3, 0
+; PPC-32-VSX-NEXT:    stw 3, 28(0)
+; PPC-32-VSX-NEXT:    stw 3, 24(0)
+; PPC-32-VSX-NEXT:    stw 3, 20(0)
+; PPC-32-VSX-NEXT:    stw 3, 16(0)
+; PPC-32-VSX-NEXT:    stw 3, 12(0)
+; PPC-32-VSX-NEXT:    stw 3, 8(0)
+; PPC-32-VSX-NEXT:    stw 3, 4(0)
+; PPC-32-VSX-NEXT:    stw 3, 0(0)
+; PPC-32-VSX-NEXT:    blr
+;
+; PPC-64-LABEL: memop_soft_float:
+; PPC-64:       # %bb.0:
+; PPC-64-NEXT:    ld 3, 24(0)
+; PPC-64-NEXT:    li 4, 0
+; PPC-64-NEXT:    std 3, 24(0)
+; PPC-64-NEXT:    ld 3, 16(0)
+; PPC-64-NEXT:    std 3, 16(0)
+; PPC-64-NEXT:    ld 3, 8(0)
+; PPC-64-NEXT:    std 3, 8(0)
+; PPC-64-NEXT:    ld 3, 0(0)
+; PPC-64-NEXT:    std 3, 0(0)
+; PPC-64-NEXT:    std 4, 24(0)
+; PPC-64-NEXT:    std 4, 16(0)
+; PPC-64-NEXT:    std 4, 8(0)
+; PPC-64-NEXT:    std 4, 0(0)
+; PPC-64-NEXT:    blr
+;
+; PPC-64-ALTIVEC-LABEL: memop_soft_float:
+; PPC-64-ALTIVEC:       # %bb.0:
+; PPC-64-ALTIVEC-NEXT:    ld 3, 24(0)
+; PPC-64-ALTIVEC-NEXT:    li 4, 0
+; PPC-64-ALTIVEC-NEXT:    std 3, 24(0)
+; PPC-64-ALTIVEC-NEXT:    ld 3, 16(0)
+; PPC-64-ALTIVEC-NEXT:    std 3, 16(0)
+; PPC-64-ALTIVEC-NEXT:    ld 3, 8(0)
+; PPC-64-ALTIVEC-NEXT:    std 3, 8(0)
+; PPC-64-ALTIVEC-NEXT:    ld 3, 0(0)
+; PPC-64-ALTIVEC-NEXT:    std 3, 0(0)
+; PPC-64-ALTIVEC-NEXT:    std 4, 24(0)
+; PPC-64-ALTIVEC-NEXT:    std 4, 16(0)
+; PPC-64-ALTIVEC-NEXT:    std 4, 8(0)
+; PPC-64-ALTIVEC-NEXT:    std 4, 0(0)
+; PPC-64-ALTIVEC-NEXT:    blr
+;
+; PPC-64-VSX-LABEL: memop_soft_float:
+; PPC-64-VSX:       # %bb.0:
+; PPC-64-VSX-NEXT:    ld 3, 24(0)
+; PPC-64-VSX-NEXT:    li 4, 0
+; PPC-64-VSX-NEXT:    std 3, 24(0)
+; PPC-64-VSX-NEXT:    ld 3, 16(0)
+; PPC-64-VSX-NEXT:    std 3, 16(0)
+; PPC-64-VSX-NEXT:    ld 3, 8(0)
+; PPC-64-VSX-NEXT:    std 3, 8(0)
+; PPC-64-VSX-NEXT:    ld 3, 0(0)
+; PPC-64-VSX-NEXT:    std 3, 0(0)
+; PPC-64-VSX-NEXT:    std 4, 24(0)
+; PPC-64-VSX-NEXT:    std 4, 16(0)
+; PPC-64-VSX-NEXT:    std 4, 8(0)
+; PPC-64-VSX-NEXT:    std 4, 0(0)
+; PPC-64-VSX-NEXT:    blr
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+define void @memop_noimplicitfloat() noimplicitfloat {
+; PPC-32-LABEL: memop_noimplicitfloat:
+; PPC-32:       # %bb.0:
+; PPC-32-NEXT:    lwz 3, 28(0)
+; PPC-32-NEXT:    stw 3, 28(0)
+; PPC-32-NEXT:    lwz 3, 24(0)
+; PPC-32-NEXT:    stw 3, 24(0)
+; PPC-32-NEXT:    lwz 3, 20(0)
+; PPC-32-NEXT:    stw 3, 20(0)
+; PPC-32-NEXT:    lwz 3, 16(0)
+; PPC-32-NEXT:    stw 3, 16(0)
+; PPC-32-NEXT:    lwz 3, 12(0)
+; PPC-32-NEXT:    stw 3, 12(0)
+; PPC-32-NEXT:    lwz 3, 8(0)
+; PPC-32-NEXT:    stw 3, 8(0)
+; PPC-32-NEXT:    lwz 3, 4(0)
+; PPC-32-NEXT:    stw 3, 4(0)
+; PPC-32-NEXT:    lwz 3, 0(0)
+; PPC-32-NEXT:    stw 3, 0(0)
+; PPC-32-NEXT:    li 3, 0
+; PPC-32-NEXT:    stw 3, 28(0)
+; PPC-32-NEXT:    stw 3, 24(0)
+; PPC-32-NEXT:    stw 3, 20(0)
+; PPC-32-NEXT:    stw 3, 16(0)
+; PPC-32-NEXT:    stw 3, 12(0)
+; PPC-32-NEXT:    stw 3, 8(0)
+; PPC-32-NEXT:    stw 3, 4(0)
+; PPC-32-NEXT:    stw 3, 0(0)
+; PPC-32-NEXT:    blr
+;
+; PPC-32-ALTIVEC-LABEL: memop_noimplicitfloat:
+; PPC-32-ALTIVEC:       # %bb.0:
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 28(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 28(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 24(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 24(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 20(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 20(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 16(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 16(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 12(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 12(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 8(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 8(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 4(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 4(0)
+; PPC-32-ALTIVEC-NEXT:    lwz 3, 0(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 0(0)
+; PPC-32-ALTIVEC-NEXT:    li 3, 0
+; PPC-32-ALTIVEC-NEXT:    stw 3, 28(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 24(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 20(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 16(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 12(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 8(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 4(0)
+; PPC-32-ALTIVEC-NEXT:    stw 3, 0(0)
+; PPC-32-ALTIVEC-NEXT:    blr
+;
+; PPC-32-VSX-LABEL: memop_noimplicitfloat:
+; PPC-32-VSX:       # %bb.0:
+; PPC-32-VSX-NEXT:    lwz 3, 28(0)
+; PPC-32-VSX-NEXT:    stw 3, 28(0)
+; PPC-32-VSX-NEXT:    lwz 3, 24(0)
+; PPC-32-VSX-NEXT:    stw 3, 24(0)
+; PPC-32-VSX-NEXT:    lwz 3, 20(0)
+; PPC-32-VSX-NEXT:    stw 3, 20(0)
+; PPC-32-VSX-NEXT:    lwz 3, 16(0)
+; PPC-32-VSX-NEXT:    stw 3, 16(0)
+; PPC-32-VSX-NEXT:    lwz 3, 12(0)
+; PPC-32-VSX-NEXT:    stw 3, 12(0)
+; PPC-32-VSX-NEXT:    lwz 3, 8(0)
+; PPC-32-VSX-NEXT:    stw 3, 8(0)
+; PPC-32-VSX-NEXT:    lwz 3, 4(0)
+; PPC-32-VSX-NEXT:    stw 3, 4(0)
+; PPC-32-VSX-NEXT:    lwz 3, 0(0)
+; PPC-32-VSX-NEXT:    stw 3, 0(0)
+; PPC-32-VSX-NEXT:    li 3, 0
+; PPC-32-VSX-NEXT:    stw 3, 28(0)
+; PPC-32-VSX-NEXT:    stw 3, 24(0)
+; PPC-32-VSX-NEXT:    stw 3, 20(0)
+; PPC-32-VSX-NEXT:    stw 3, 16(0)
+; PPC-32-VSX-NEXT:    stw 3, 12(0)
+; PPC-32-VSX-NEXT:    stw 3, 8(0)
+; PPC-32-VSX-NEXT:    stw 3, 4(0)
+; PPC-32-VSX-NEXT:    stw 3, 0(0)
+; PPC-32-VSX-NEXT:    blr
+;
+; PPC-64-LABEL: memop_noimplicitfloat:
+; PPC-64:       # %bb.0:
+; PPC-64-NEXT:    ld 3, 24(0)
+; PPC-64-NEXT:    li 4, 0
+; PPC-64-NEXT:    std 3, 24(0)
+; PPC-64-NEXT:    ld 3, 16(0)
+; PPC-64-NEXT:    std 3, 16(0)
+; PPC-64-NEXT:    ld 3, 8(0)
+; PPC-64-NEXT:    std 3, 8(0)
+; PPC-64-NEXT:    ld 3, 0(0)
+; PPC-64-NEXT:    std 3, 0(0)
+; PPC-64-NEXT:    std 4, 24(0)
+; PPC-64-NEXT:    std 4, 16(0)
+; PPC-64-NEXT:    std 4, 8(0)
+; PPC-64-NEXT:    std 4, 0(0)
+; PPC-64-NEXT:    blr
+;
+; PPC-64-ALTIVEC-LABEL: memop_noimplicitfloat:
+; PPC-64-ALTIVEC:       # %bb.0:
+; PPC-64-ALTIVEC-NEXT:    ld 3, 24(0)
+; PPC-64-ALTIVEC-NEXT:    li 4, 0
+; PPC-64-ALTIVEC-NEXT:    std 3, 24(0)
+; PPC-64-ALTIVEC-NEXT:    ld 3, 16(0)
+; PPC-64-ALTIVEC-NEXT:    std 3, 16(0)
+; PPC-64-ALTIVEC-NEXT:    ld 3, 8(0)
+; PPC-64-ALTIVEC-NEXT:    std 3, 8(0)
+; PPC-64-ALTIVEC-NEXT:    ld 3, 0(0)
+; PPC-64-ALTIVEC-NEXT:    std 3, 0(0)
+; PPC-64-ALTIVEC-NEXT:    std 4, 24(0)
+; PPC-64-ALTIVEC-NEXT:    std 4, 16(0)
+; PPC-64-ALTIVEC-NEXT:    std 4, 8(0)
+; PPC-64-ALTIVEC-NEXT:    std 4, 0(0)
+; PPC-64-ALTIVEC-NEXT:    blr
+;
+; PPC-64-VSX-LABEL: memop_noimplicitfloat:
+; PPC-64-VSX:       # %bb.0:
+; PPC-64-VSX-NEXT:    ld 3, 24(0)
+; PPC-64-VSX-NEXT:    li 4, 0
+; PPC-64-VSX-NEXT:    std 3, 24(0)
+; PPC-64-VSX-NEXT:    ld 3, 16(0)
+; PPC-64-VSX-NEXT:    std 3, 16(0)
+; PPC-64-VSX-NEXT:    ld 3, 8(0)
+; PPC-64-VSX-NEXT:    std 3, 8(0)
+; PPC-64-VSX-NEXT:    ld 3, 0(0)
+; PPC-64-VSX-NEXT:    std 3, 0(0)
+; PPC-64-VSX-NEXT:    std 4, 24(0)
+; PPC-64-VSX-NEXT:    std 4, 16(0)
+; PPC-64-VSX-NEXT:    std 4, 8(0)
+; PPC-64-VSX-NEXT:    std 4, 0(0)
+; PPC-64-VSX-NEXT:    blr
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: write)
diff --git a/llvm/test/CodeGen/SPARC/memop-soft-float.ll b/llvm/test/CodeGen/SPARC/memop-soft-float.ll
new file mode 100644
index 00000000000000..2765bfde1eee07
--- /dev/null
+++ b/llvm/test/CodeGen/SPARC/memop-soft-float.ll
@@ -0,0 +1,128 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=sparc < %s | FileCheck %s -check-prefix SPARC-32
+; RUN: llc -mtriple=sparc64 < %s | FileCheck %s -check-prefix SPARC-64
+
+define void @memop_soft_float() "use-soft-float"="true" {
+; SPARC-32-LABEL: memop_soft_float:
+; SPARC-32:         .cfi_startproc
+; SPARC-32-NEXT:  ! %bb.0:
+; SPARC-32-NEXT:    mov 28, %o0
+; SPARC-32-NEXT:    ld [%o0], %o1
+; SPARC-32-NEXT:    st %o1, [%o0]
+; SPARC-32-NEXT:    mov 24, %o1
+; SPARC-32-NEXT:    ld [%o1], %o2
+; SPARC-32-NEXT:    st %o2, [%o1]
+; SPARC-32-NEXT:    mov 20, %o2
+; SPARC-32-NEXT:    ld [%o2], %o3
+; SPARC-32-NEXT:    st %o3, [%o2]
+; SPARC-32-NEXT:    mov 16, %o3
+; SPARC-32-NEXT:    ld [%o3], %o4
+; SPARC-32-NEXT:    st %o4, [%o3]
+; SPARC-32-NEXT:    mov 12, %o4
+; SPARC-32-NEXT:    ld [%o4], %o5
+; SPARC-32-NEXT:    st %o5, [%o4]
+; SPARC-32-NEXT:    mov 8, %o5
+; SPARC-32-NEXT:    ld [%o5], %g2
+; SPARC-32-NEXT:    st %g2, [%o5]
+; SPARC-32-NEXT:    mov 4, %g2
+; SPARC-32-NEXT:    ld [%g2], %g3
+; SPARC-32-NEXT:    st %g3, [%g2]
+; SPARC-32-NEXT:    ld [%g0], %g3
+; SPARC-32-NEXT:    st %g3, [%g0]
+; SPARC-32-NEXT:    st %g0, [%o0]
+; SPARC-32-NEXT:    st %g0, [%o1]
+; SPARC-32-NEXT:    st %g0, [%o2]
+; SPARC-32-NEXT:    st %g0, [%o3]
+; SPARC-32-NEXT:    st %g0, [%o4]
+; SPARC-32-NEXT:    st %g0, [%o5]
+; SPARC-32-NEXT:    st %g0, [%g2]
+; SPARC-32-NEXT:    retl
+; SPARC-32-NEXT:    st %g0, [%g0]
+;
+; SPARC-64-LABEL: memop_soft_float:
+; SPARC-64:         .cfi_startproc
+; SPARC-64-NEXT:  ! %bb.0:
+; SPARC-64-NEXT:    mov 24, %o0
+; SPARC-64-NEXT:    ldx [%o0], %o1
+; SPARC-64-NEXT:    stx %o1, [%o0]
+; SPARC-64-NEXT:    mov 16, %o1
+; SPARC-64-NEXT:    ldx [%o1], %o2
+; SPARC-64-NEXT:    stx %o2, [%o1]
+; SPARC-64-NEXT:    mov 8, %o2
+; SPARC-64-NEXT:    ldx [%o2], %o3
+; SPARC-64-NEXT:    stx %o3, [%o2]
+; SPARC-64-NEXT:    ldx [%g0], %o3
+; SPARC-64-NEXT:    stx %o3, [%g0]
+; SPARC-64-NEXT:    stx %g0, [%o0]
+; SPARC-64-NEXT:    stx %g0, [%o1]
+; SPARC-64-NEXT:    stx %g0, [%o2]
+; SPARC-64-NEXT:    retl
+; SPARC-64-NEXT:    stx %g0, [%g0]
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+define void @memop_noimplicitfloat() noimplicitfloat {
+; SPARC-32-LABEL: memop_noimplicitfloat:
+; SPARC-32:         .cfi_startproc
+; SPARC-32-NEXT:  ! %bb.0:
+; SPARC-32-NEXT:    mov 28, %o0
+; SPARC-32-NEXT:    ld [%o0], %o1
+; SPARC-32-NEXT:    st %o1, [%o0]
+; SPARC-32-NEXT:    mov 24, %o1
+; SPARC-32-NEXT:    ld [%o1], %o2
+; SPARC-32-NEXT:    st %o2, [%o1]
+; SPARC-32-NEXT:    mov 20, %o2
+; SPARC-32-NEXT:    ld [%o2], %o3
+; SPARC-32-NEXT:    st %o3, [%o2]
+; SPARC-32-NEXT:    mov 16, %o3
+; SPARC-32-NEXT:    ld [%o3], %o4
+; SPARC-32-NEXT:    st %o4, [%o3]
+; SPARC-32-NEXT:    mov 12, %o4
+; SPARC-32-NEXT:    ld [%o4], %o5
+; SPARC-32-NEXT:    st %o5, [%o4]
+; SPARC-32-NEXT:    mov 8, %o5
+; SPARC-32-NEXT:    ld [%o5], %g2
+; SPARC-32-NEXT:    st %g2, [%o5]
+; SPARC-32-NEXT:    mov 4, %g2
+; SPARC-32-NEXT:    ld [%g2], %g3
+; SPARC-32-NEXT:    st %g3, [%g2]
+; SPARC-32-NEXT:    ld [%g0], %g3
+; SPARC-32-NEXT:    st %g3, [%g0]
+; SPARC-32-NEXT:    st %g0, [%o0]
+; SPARC-32-NEXT:    st %g0, [%o1]
+; SPARC-32-NEXT:    st %g0, [%o2]
+; SPARC-32-NEXT:    st %g0, [%o3]
+; SPARC-32-NEXT:    st %g0, [%o4]
+; SPARC-32-NEXT:    st %g0, [%o5]
+; SPARC-32-NEXT:    st %g0, [%g2]
+; SPARC-32-NEXT:    retl
+; SPARC-32-NEXT:    st %g0, [%g0]
+;
+; SPARC-64-LABEL: memop_noimplicitfloat:
+; SPARC-64:         .cfi_startproc
+; SPARC-64-NEXT:  ! %bb.0:
+; SPARC-64-NEXT:    mov 24, %o0
+; SPARC-64-NEXT:    ldx [%o0], %o1
+; SPARC-64-NEXT:    stx %o1, [%o0]
+; SPARC-64-NEXT:    mov 16, %o1
+; SPARC-64-NEXT:    ldx [%o1], %o2
+; SPARC-64-NEXT:    stx %o2, [%o1]
+; SPARC-64-NEXT:    mov 8, %o2
+; SPARC-64-NEXT:    ldx [%o2], %o3
+; SPARC-64-NEXT:    stx %o3, [%o2]
+; SPARC-64-NEXT:    ldx [%g0], %o3
+; SPARC-64-NEXT:    stx %o3, [%g0]
+; SPARC-64-NEXT:    stx %g0, [%o0]
+; SPARC-64-NEXT:    stx %g0, [%o1]
+; SPARC-64-NEXT:    stx %g0, [%o2]
+; SPARC-64-NEXT:    retl
+; SPARC-64-NEXT:    stx %g0, [%g0]
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: write)
diff --git a/llvm/test/CodeGen/SystemZ/memop-soft-float.ll b/llvm/test/CodeGen/SystemZ/memop-soft-float.ll
new file mode 100644
index 00000000000000..7080dfdf87eea3
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/memop-soft-float.ll
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=s390x < %s | FileCheck %s -check-prefix S390X
+
+define void @memop_soft_float() "use-soft-float"="true" {
+; S390X-LABEL: memop_soft_float:
+; S390X:       # %bb.0:
+; S390X-NEXT:    stmg %r14, %r15, 112(%r15)
+; S390X-NEXT:    .cfi_offset %r14, -48
+; S390X-NEXT:    .cfi_offset %r15, -40
+; S390X-NEXT:    aghi %r15, -160
+; S390X-NEXT:    .cfi_def_cfa_offset 320
+; S390X-NEXT:    lghi %r2, 0
+; S390X-NEXT:    lghi %r3, 0
+; S390X-NEXT:    lghi %r4, 32
+; S390X-NEXT:    brasl %r14, memcpy at PLT
+; S390X-NEXT:    lghi %r2, 0
+; S390X-NEXT:    lhi %r3, 0
+; S390X-NEXT:    lghi %r4, 32
+; S390X-NEXT:    brasl %r14, memset at PLT
+; S390X-NEXT:    lmg %r14, %r15, 272(%r15)
+; S390X-NEXT:    br %r14
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+define void @memop_noimplicitfloat() noimplicitfloat {
+; S390X-LABEL: memop_noimplicitfloat:
+; S390X:       # %bb.0:
+; S390X-NEXT:    stmg %r14, %r15, 112(%r15)
+; S390X-NEXT:    .cfi_offset %r14, -48
+; S390X-NEXT:    .cfi_offset %r15, -40
+; S390X-NEXT:    aghi %r15, -160
+; S390X-NEXT:    .cfi_def_cfa_offset 320
+; S390X-NEXT:    lghi %r2, 0
+; S390X-NEXT:    lghi %r3, 0
+; S390X-NEXT:    lghi %r4, 32
+; S390X-NEXT:    brasl %r14, memcpy at PLT
+; S390X-NEXT:    lghi %r2, 0
+; S390X-NEXT:    lhi %r3, 0
+; S390X-NEXT:    lghi %r4, 32
+; S390X-NEXT:    brasl %r14, memset at PLT
+; S390X-NEXT:    lmg %r14, %r15, 272(%r15)
+; S390X-NEXT:    br %r14
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: write)
diff --git a/llvm/test/CodeGen/X86/memop-soft-float.ll b/llvm/test/CodeGen/X86/memop-soft-float.ll
new file mode 100644
index 00000000000000..82df71eed477ea
--- /dev/null
+++ b/llvm/test/CodeGen/X86/memop-soft-float.ll
@@ -0,0 +1,464 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=i386 < %s | FileCheck %s -check-prefix X86-32
+; RUN: llc -mtriple=i386 -mattr +sse < %s | FileCheck %s -check-prefix X86-32-SSE1
+; RUN: llc -mtriple=i386 -mattr +sse2 < %s | FileCheck %s -check-prefix X86-32-SSE2
+; RUN: llc -mtriple=i386 -mattr +avx < %s | FileCheck %s -check-prefix X86-32-AVX
+; RUN: llc -mtriple=i386 -mattr +avx512f,+evex512 < %s | FileCheck %s -check-prefix X86-32-AVX512
+; RUN: llc -mtriple=x86_64 -mattr -sse2,-sse < %s | FileCheck %s -check-prefix X86-64
+; RUN: llc -mtriple=x86_64 -mattr -sse2,+sse < %s | FileCheck %s -check-prefix X86-64-SSE1
+; RUN: llc -mtriple=x86_64 < %s | FileCheck %s -check-prefix X86-64-SSE2
+; RUN: llc -mtriple=x86_64 -mattr +avx < %s | FileCheck %s -check-prefix X86-64-AVX
+; RUN: llc -mtriple=x86_64 -mattr +avx512f,+evex512 < %s | FileCheck %s -check-prefix X86-64-AVX512
+
+define void @memop_soft_float() "use-soft-float"="true" {
+; X86-32-LABEL: memop_soft_float:
+; X86-32:       # %bb.0:
+; X86-32-NEXT:    movl 28, %eax
+; X86-32-NEXT:    movl %eax, 28
+; X86-32-NEXT:    movl 24, %eax
+; X86-32-NEXT:    movl %eax, 24
+; X86-32-NEXT:    movl 20, %eax
+; X86-32-NEXT:    movl %eax, 20
+; X86-32-NEXT:    movl 16, %eax
+; X86-32-NEXT:    movl %eax, 16
+; X86-32-NEXT:    movl 12, %eax
+; X86-32-NEXT:    movl %eax, 12
+; X86-32-NEXT:    movl 8, %eax
+; X86-32-NEXT:    movl %eax, 8
+; X86-32-NEXT:    movl 0, %eax
+; X86-32-NEXT:    movl 4, %ecx
+; X86-32-NEXT:    movl %ecx, 4
+; X86-32-NEXT:    movl %eax, 0
+; X86-32-NEXT:    movl $0, 28
+; X86-32-NEXT:    movl $0, 24
+; X86-32-NEXT:    movl $0, 20
+; X86-32-NEXT:    movl $0, 16
+; X86-32-NEXT:    movl $0, 12
+; X86-32-NEXT:    movl $0, 8
+; X86-32-NEXT:    movl $0, 4
+; X86-32-NEXT:    movl $0, 0
+; X86-32-NEXT:    retl
+;
+; X86-32-SSE1-LABEL: memop_soft_float:
+; X86-32-SSE1:       # %bb.0:
+; X86-32-SSE1-NEXT:    movl 28, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 28
+; X86-32-SSE1-NEXT:    movl 24, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 24
+; X86-32-SSE1-NEXT:    movl 20, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 20
+; X86-32-SSE1-NEXT:    movl 16, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 16
+; X86-32-SSE1-NEXT:    movl 12, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 12
+; X86-32-SSE1-NEXT:    movl 8, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 8
+; X86-32-SSE1-NEXT:    movl 0, %eax
+; X86-32-SSE1-NEXT:    movl 4, %ecx
+; X86-32-SSE1-NEXT:    movl %ecx, 4
+; X86-32-SSE1-NEXT:    movl %eax, 0
+; X86-32-SSE1-NEXT:    movl $0, 28
+; X86-32-SSE1-NEXT:    movl $0, 24
+; X86-32-SSE1-NEXT:    movl $0, 20
+; X86-32-SSE1-NEXT:    movl $0, 16
+; X86-32-SSE1-NEXT:    movl $0, 12
+; X86-32-SSE1-NEXT:    movl $0, 8
+; X86-32-SSE1-NEXT:    movl $0, 4
+; X86-32-SSE1-NEXT:    movl $0, 0
+; X86-32-SSE1-NEXT:    retl
+;
+; X86-32-SSE2-LABEL: memop_soft_float:
+; X86-32-SSE2:       # %bb.0:
+; X86-32-SSE2-NEXT:    movl 28, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 28
+; X86-32-SSE2-NEXT:    movl 24, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 24
+; X86-32-SSE2-NEXT:    movl 20, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 20
+; X86-32-SSE2-NEXT:    movl 16, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 16
+; X86-32-SSE2-NEXT:    movl 12, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 12
+; X86-32-SSE2-NEXT:    movl 8, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 8
+; X86-32-SSE2-NEXT:    movl 0, %eax
+; X86-32-SSE2-NEXT:    movl 4, %ecx
+; X86-32-SSE2-NEXT:    movl %ecx, 4
+; X86-32-SSE2-NEXT:    movl %eax, 0
+; X86-32-SSE2-NEXT:    movl $0, 28
+; X86-32-SSE2-NEXT:    movl $0, 24
+; X86-32-SSE2-NEXT:    movl $0, 20
+; X86-32-SSE2-NEXT:    movl $0, 16
+; X86-32-SSE2-NEXT:    movl $0, 12
+; X86-32-SSE2-NEXT:    movl $0, 8
+; X86-32-SSE2-NEXT:    movl $0, 4
+; X86-32-SSE2-NEXT:    movl $0, 0
+; X86-32-SSE2-NEXT:    retl
+;
+; X86-32-AVX-LABEL: memop_soft_float:
+; X86-32-AVX:       # %bb.0:
+; X86-32-AVX-NEXT:    movl 28, %eax
+; X86-32-AVX-NEXT:    movl %eax, 28
+; X86-32-AVX-NEXT:    movl 24, %eax
+; X86-32-AVX-NEXT:    movl %eax, 24
+; X86-32-AVX-NEXT:    movl 20, %eax
+; X86-32-AVX-NEXT:    movl %eax, 20
+; X86-32-AVX-NEXT:    movl 16, %eax
+; X86-32-AVX-NEXT:    movl %eax, 16
+; X86-32-AVX-NEXT:    movl 12, %eax
+; X86-32-AVX-NEXT:    movl %eax, 12
+; X86-32-AVX-NEXT:    movl 8, %eax
+; X86-32-AVX-NEXT:    movl %eax, 8
+; X86-32-AVX-NEXT:    movl 0, %eax
+; X86-32-AVX-NEXT:    movl 4, %ecx
+; X86-32-AVX-NEXT:    movl %ecx, 4
+; X86-32-AVX-NEXT:    movl %eax, 0
+; X86-32-AVX-NEXT:    movl $0, 28
+; X86-32-AVX-NEXT:    movl $0, 24
+; X86-32-AVX-NEXT:    movl $0, 20
+; X86-32-AVX-NEXT:    movl $0, 16
+; X86-32-AVX-NEXT:    movl $0, 12
+; X86-32-AVX-NEXT:    movl $0, 8
+; X86-32-AVX-NEXT:    movl $0, 4
+; X86-32-AVX-NEXT:    movl $0, 0
+; X86-32-AVX-NEXT:    retl
+;
+; X86-32-AVX512-LABEL: memop_soft_float:
+; X86-32-AVX512:       # %bb.0:
+; X86-32-AVX512-NEXT:    movl 28, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 28
+; X86-32-AVX512-NEXT:    movl 24, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 24
+; X86-32-AVX512-NEXT:    movl 20, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 20
+; X86-32-AVX512-NEXT:    movl 16, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 16
+; X86-32-AVX512-NEXT:    movl 12, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 12
+; X86-32-AVX512-NEXT:    movl 8, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 8
+; X86-32-AVX512-NEXT:    movl 0, %eax
+; X86-32-AVX512-NEXT:    movl 4, %ecx
+; X86-32-AVX512-NEXT:    movl %ecx, 4
+; X86-32-AVX512-NEXT:    movl %eax, 0
+; X86-32-AVX512-NEXT:    movl $0, 28
+; X86-32-AVX512-NEXT:    movl $0, 24
+; X86-32-AVX512-NEXT:    movl $0, 20
+; X86-32-AVX512-NEXT:    movl $0, 16
+; X86-32-AVX512-NEXT:    movl $0, 12
+; X86-32-AVX512-NEXT:    movl $0, 8
+; X86-32-AVX512-NEXT:    movl $0, 4
+; X86-32-AVX512-NEXT:    movl $0, 0
+; X86-32-AVX512-NEXT:    retl
+;
+; X86-64-LABEL: memop_soft_float:
+; X86-64:       # %bb.0:
+; X86-64-NEXT:    movq 24, %rax
+; X86-64-NEXT:    movq %rax, 24
+; X86-64-NEXT:    movq 16, %rax
+; X86-64-NEXT:    movq %rax, 16
+; X86-64-NEXT:    movq 0, %rax
+; X86-64-NEXT:    movq 8, %rcx
+; X86-64-NEXT:    movq %rcx, 8
+; X86-64-NEXT:    movq %rax, 0
+; X86-64-NEXT:    movq $0, 24
+; X86-64-NEXT:    movq $0, 16
+; X86-64-NEXT:    movq $0, 8
+; X86-64-NEXT:    movq $0, 0
+; X86-64-NEXT:    retq
+;
+; X86-64-SSE1-LABEL: memop_soft_float:
+; X86-64-SSE1:       # %bb.0:
+; X86-64-SSE1-NEXT:    movq 24, %rax
+; X86-64-SSE1-NEXT:    movq %rax, 24
+; X86-64-SSE1-NEXT:    movq 16, %rax
+; X86-64-SSE1-NEXT:    movq %rax, 16
+; X86-64-SSE1-NEXT:    movq 0, %rax
+; X86-64-SSE1-NEXT:    movq 8, %rcx
+; X86-64-SSE1-NEXT:    movq %rcx, 8
+; X86-64-SSE1-NEXT:    movq %rax, 0
+; X86-64-SSE1-NEXT:    movq $0, 24
+; X86-64-SSE1-NEXT:    movq $0, 16
+; X86-64-SSE1-NEXT:    movq $0, 8
+; X86-64-SSE1-NEXT:    movq $0, 0
+; X86-64-SSE1-NEXT:    retq
+;
+; X86-64-SSE2-LABEL: memop_soft_float:
+; X86-64-SSE2:       # %bb.0:
+; X86-64-SSE2-NEXT:    movq 24, %rax
+; X86-64-SSE2-NEXT:    movq %rax, 24
+; X86-64-SSE2-NEXT:    movq 16, %rax
+; X86-64-SSE2-NEXT:    movq %rax, 16
+; X86-64-SSE2-NEXT:    movq 0, %rax
+; X86-64-SSE2-NEXT:    movq 8, %rcx
+; X86-64-SSE2-NEXT:    movq %rcx, 8
+; X86-64-SSE2-NEXT:    movq %rax, 0
+; X86-64-SSE2-NEXT:    movq $0, 24
+; X86-64-SSE2-NEXT:    movq $0, 16
+; X86-64-SSE2-NEXT:    movq $0, 8
+; X86-64-SSE2-NEXT:    movq $0, 0
+; X86-64-SSE2-NEXT:    retq
+;
+; X86-64-AVX-LABEL: memop_soft_float:
+; X86-64-AVX:       # %bb.0:
+; X86-64-AVX-NEXT:    movq 24, %rax
+; X86-64-AVX-NEXT:    movq %rax, 24
+; X86-64-AVX-NEXT:    movq 16, %rax
+; X86-64-AVX-NEXT:    movq %rax, 16
+; X86-64-AVX-NEXT:    movq 0, %rax
+; X86-64-AVX-NEXT:    movq 8, %rcx
+; X86-64-AVX-NEXT:    movq %rcx, 8
+; X86-64-AVX-NEXT:    movq %rax, 0
+; X86-64-AVX-NEXT:    movq $0, 24
+; X86-64-AVX-NEXT:    movq $0, 16
+; X86-64-AVX-NEXT:    movq $0, 8
+; X86-64-AVX-NEXT:    movq $0, 0
+; X86-64-AVX-NEXT:    retq
+;
+; X86-64-AVX512-LABEL: memop_soft_float:
+; X86-64-AVX512:       # %bb.0:
+; X86-64-AVX512-NEXT:    movq 24, %rax
+; X86-64-AVX512-NEXT:    movq %rax, 24
+; X86-64-AVX512-NEXT:    movq 16, %rax
+; X86-64-AVX512-NEXT:    movq %rax, 16
+; X86-64-AVX512-NEXT:    movq 0, %rax
+; X86-64-AVX512-NEXT:    movq 8, %rcx
+; X86-64-AVX512-NEXT:    movq %rcx, 8
+; X86-64-AVX512-NEXT:    movq %rax, 0
+; X86-64-AVX512-NEXT:    movq $0, 24
+; X86-64-AVX512-NEXT:    movq $0, 16
+; X86-64-AVX512-NEXT:    movq $0, 8
+; X86-64-AVX512-NEXT:    movq $0, 0
+; X86-64-AVX512-NEXT:    retq
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+define void @memop_noimplicitfloat() noimplicitfloat {
+; X86-32-LABEL: memop_noimplicitfloat:
+; X86-32:       # %bb.0:
+; X86-32-NEXT:    movl 28, %eax
+; X86-32-NEXT:    movl %eax, 28
+; X86-32-NEXT:    movl 24, %eax
+; X86-32-NEXT:    movl %eax, 24
+; X86-32-NEXT:    movl 20, %eax
+; X86-32-NEXT:    movl %eax, 20
+; X86-32-NEXT:    movl 16, %eax
+; X86-32-NEXT:    movl %eax, 16
+; X86-32-NEXT:    movl 12, %eax
+; X86-32-NEXT:    movl %eax, 12
+; X86-32-NEXT:    movl 8, %eax
+; X86-32-NEXT:    movl %eax, 8
+; X86-32-NEXT:    movl 0, %eax
+; X86-32-NEXT:    movl 4, %ecx
+; X86-32-NEXT:    movl %ecx, 4
+; X86-32-NEXT:    movl %eax, 0
+; X86-32-NEXT:    movl $0, 28
+; X86-32-NEXT:    movl $0, 24
+; X86-32-NEXT:    movl $0, 20
+; X86-32-NEXT:    movl $0, 16
+; X86-32-NEXT:    movl $0, 12
+; X86-32-NEXT:    movl $0, 8
+; X86-32-NEXT:    movl $0, 4
+; X86-32-NEXT:    movl $0, 0
+; X86-32-NEXT:    retl
+;
+; X86-32-SSE1-LABEL: memop_noimplicitfloat:
+; X86-32-SSE1:       # %bb.0:
+; X86-32-SSE1-NEXT:    movl 28, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 28
+; X86-32-SSE1-NEXT:    movl 24, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 24
+; X86-32-SSE1-NEXT:    movl 20, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 20
+; X86-32-SSE1-NEXT:    movl 16, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 16
+; X86-32-SSE1-NEXT:    movl 12, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 12
+; X86-32-SSE1-NEXT:    movl 8, %eax
+; X86-32-SSE1-NEXT:    movl %eax, 8
+; X86-32-SSE1-NEXT:    movl 0, %eax
+; X86-32-SSE1-NEXT:    movl 4, %ecx
+; X86-32-SSE1-NEXT:    movl %ecx, 4
+; X86-32-SSE1-NEXT:    movl %eax, 0
+; X86-32-SSE1-NEXT:    movl $0, 28
+; X86-32-SSE1-NEXT:    movl $0, 24
+; X86-32-SSE1-NEXT:    movl $0, 20
+; X86-32-SSE1-NEXT:    movl $0, 16
+; X86-32-SSE1-NEXT:    movl $0, 12
+; X86-32-SSE1-NEXT:    movl $0, 8
+; X86-32-SSE1-NEXT:    movl $0, 4
+; X86-32-SSE1-NEXT:    movl $0, 0
+; X86-32-SSE1-NEXT:    retl
+;
+; X86-32-SSE2-LABEL: memop_noimplicitfloat:
+; X86-32-SSE2:       # %bb.0:
+; X86-32-SSE2-NEXT:    movl 28, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 28
+; X86-32-SSE2-NEXT:    movl 24, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 24
+; X86-32-SSE2-NEXT:    movl 20, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 20
+; X86-32-SSE2-NEXT:    movl 16, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 16
+; X86-32-SSE2-NEXT:    movl 12, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 12
+; X86-32-SSE2-NEXT:    movl 8, %eax
+; X86-32-SSE2-NEXT:    movl %eax, 8
+; X86-32-SSE2-NEXT:    movl 0, %eax
+; X86-32-SSE2-NEXT:    movl 4, %ecx
+; X86-32-SSE2-NEXT:    movl %ecx, 4
+; X86-32-SSE2-NEXT:    movl %eax, 0
+; X86-32-SSE2-NEXT:    movl $0, 28
+; X86-32-SSE2-NEXT:    movl $0, 24
+; X86-32-SSE2-NEXT:    movl $0, 20
+; X86-32-SSE2-NEXT:    movl $0, 16
+; X86-32-SSE2-NEXT:    movl $0, 12
+; X86-32-SSE2-NEXT:    movl $0, 8
+; X86-32-SSE2-NEXT:    movl $0, 4
+; X86-32-SSE2-NEXT:    movl $0, 0
+; X86-32-SSE2-NEXT:    retl
+;
+; X86-32-AVX-LABEL: memop_noimplicitfloat:
+; X86-32-AVX:       # %bb.0:
+; X86-32-AVX-NEXT:    movl 28, %eax
+; X86-32-AVX-NEXT:    movl %eax, 28
+; X86-32-AVX-NEXT:    movl 24, %eax
+; X86-32-AVX-NEXT:    movl %eax, 24
+; X86-32-AVX-NEXT:    movl 20, %eax
+; X86-32-AVX-NEXT:    movl %eax, 20
+; X86-32-AVX-NEXT:    movl 16, %eax
+; X86-32-AVX-NEXT:    movl %eax, 16
+; X86-32-AVX-NEXT:    movl 12, %eax
+; X86-32-AVX-NEXT:    movl %eax, 12
+; X86-32-AVX-NEXT:    movl 8, %eax
+; X86-32-AVX-NEXT:    movl %eax, 8
+; X86-32-AVX-NEXT:    movl 0, %eax
+; X86-32-AVX-NEXT:    movl 4, %ecx
+; X86-32-AVX-NEXT:    movl %ecx, 4
+; X86-32-AVX-NEXT:    movl %eax, 0
+; X86-32-AVX-NEXT:    movl $0, 28
+; X86-32-AVX-NEXT:    movl $0, 24
+; X86-32-AVX-NEXT:    movl $0, 20
+; X86-32-AVX-NEXT:    movl $0, 16
+; X86-32-AVX-NEXT:    movl $0, 12
+; X86-32-AVX-NEXT:    movl $0, 8
+; X86-32-AVX-NEXT:    movl $0, 4
+; X86-32-AVX-NEXT:    movl $0, 0
+; X86-32-AVX-NEXT:    retl
+;
+; X86-32-AVX512-LABEL: memop_noimplicitfloat:
+; X86-32-AVX512:       # %bb.0:
+; X86-32-AVX512-NEXT:    movl 28, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 28
+; X86-32-AVX512-NEXT:    movl 24, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 24
+; X86-32-AVX512-NEXT:    movl 20, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 20
+; X86-32-AVX512-NEXT:    movl 16, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 16
+; X86-32-AVX512-NEXT:    movl 12, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 12
+; X86-32-AVX512-NEXT:    movl 8, %eax
+; X86-32-AVX512-NEXT:    movl %eax, 8
+; X86-32-AVX512-NEXT:    movl 0, %eax
+; X86-32-AVX512-NEXT:    movl 4, %ecx
+; X86-32-AVX512-NEXT:    movl %ecx, 4
+; X86-32-AVX512-NEXT:    movl %eax, 0
+; X86-32-AVX512-NEXT:    movl $0, 28
+; X86-32-AVX512-NEXT:    movl $0, 24
+; X86-32-AVX512-NEXT:    movl $0, 20
+; X86-32-AVX512-NEXT:    movl $0, 16
+; X86-32-AVX512-NEXT:    movl $0, 12
+; X86-32-AVX512-NEXT:    movl $0, 8
+; X86-32-AVX512-NEXT:    movl $0, 4
+; X86-32-AVX512-NEXT:    movl $0, 0
+; X86-32-AVX512-NEXT:    retl
+;
+; X86-64-LABEL: memop_noimplicitfloat:
+; X86-64:       # %bb.0:
+; X86-64-NEXT:    movq 24, %rax
+; X86-64-NEXT:    movq %rax, 24
+; X86-64-NEXT:    movq 16, %rax
+; X86-64-NEXT:    movq %rax, 16
+; X86-64-NEXT:    movq 0, %rax
+; X86-64-NEXT:    movq 8, %rcx
+; X86-64-NEXT:    movq %rcx, 8
+; X86-64-NEXT:    movq %rax, 0
+; X86-64-NEXT:    movq $0, 24
+; X86-64-NEXT:    movq $0, 16
+; X86-64-NEXT:    movq $0, 8
+; X86-64-NEXT:    movq $0, 0
+; X86-64-NEXT:    retq
+;
+; X86-64-SSE1-LABEL: memop_noimplicitfloat:
+; X86-64-SSE1:       # %bb.0:
+; X86-64-SSE1-NEXT:    movq 24, %rax
+; X86-64-SSE1-NEXT:    movq %rax, 24
+; X86-64-SSE1-NEXT:    movq 16, %rax
+; X86-64-SSE1-NEXT:    movq %rax, 16
+; X86-64-SSE1-NEXT:    movq 0, %rax
+; X86-64-SSE1-NEXT:    movq 8, %rcx
+; X86-64-SSE1-NEXT:    movq %rcx, 8
+; X86-64-SSE1-NEXT:    movq %rax, 0
+; X86-64-SSE1-NEXT:    movq $0, 24
+; X86-64-SSE1-NEXT:    movq $0, 16
+; X86-64-SSE1-NEXT:    movq $0, 8
+; X86-64-SSE1-NEXT:    movq $0, 0
+; X86-64-SSE1-NEXT:    retq
+;
+; X86-64-SSE2-LABEL: memop_noimplicitfloat:
+; X86-64-SSE2:       # %bb.0:
+; X86-64-SSE2-NEXT:    movq 24, %rax
+; X86-64-SSE2-NEXT:    movq %rax, 24
+; X86-64-SSE2-NEXT:    movq 16, %rax
+; X86-64-SSE2-NEXT:    movq %rax, 16
+; X86-64-SSE2-NEXT:    movq 0, %rax
+; X86-64-SSE2-NEXT:    movq 8, %rcx
+; X86-64-SSE2-NEXT:    movq %rcx, 8
+; X86-64-SSE2-NEXT:    movq %rax, 0
+; X86-64-SSE2-NEXT:    movq $0, 24
+; X86-64-SSE2-NEXT:    movq $0, 16
+; X86-64-SSE2-NEXT:    movq $0, 8
+; X86-64-SSE2-NEXT:    movq $0, 0
+; X86-64-SSE2-NEXT:    retq
+;
+; X86-64-AVX-LABEL: memop_noimplicitfloat:
+; X86-64-AVX:       # %bb.0:
+; X86-64-AVX-NEXT:    movq 24, %rax
+; X86-64-AVX-NEXT:    movq %rax, 24
+; X86-64-AVX-NEXT:    movq 16, %rax
+; X86-64-AVX-NEXT:    movq %rax, 16
+; X86-64-AVX-NEXT:    movq 0, %rax
+; X86-64-AVX-NEXT:    movq 8, %rcx
+; X86-64-AVX-NEXT:    movq %rcx, 8
+; X86-64-AVX-NEXT:    movq %rax, 0
+; X86-64-AVX-NEXT:    movq $0, 24
+; X86-64-AVX-NEXT:    movq $0, 16
+; X86-64-AVX-NEXT:    movq $0, 8
+; X86-64-AVX-NEXT:    movq $0, 0
+; X86-64-AVX-NEXT:    retq
+;
+; X86-64-AVX512-LABEL: memop_noimplicitfloat:
+; X86-64-AVX512:       # %bb.0:
+; X86-64-AVX512-NEXT:    movq 24, %rax
+; X86-64-AVX512-NEXT:    movq %rax, 24
+; X86-64-AVX512-NEXT:    movq 16, %rax
+; X86-64-AVX512-NEXT:    movq %rax, 16
+; X86-64-AVX512-NEXT:    movq 0, %rax
+; X86-64-AVX512-NEXT:    movq 8, %rcx
+; X86-64-AVX512-NEXT:    movq %rcx, 8
+; X86-64-AVX512-NEXT:    movq %rax, 0
+; X86-64-AVX512-NEXT:    movq $0, 24
+; X86-64-AVX512-NEXT:    movq $0, 16
+; X86-64-AVX512-NEXT:    movq $0, 8
+; X86-64-AVX512-NEXT:    movq $0, 0
+; X86-64-AVX512-NEXT:    retq
+    call void @llvm.memcpy.p0.p0.i32(ptr null, ptr null, i32 32, i1 true)
+    call void @llvm.memset.p0.i32(ptr null, i8 0, i32 32, i1 true)
+    ret void
+}
+
+declare void @llvm.memcpy.p0.p0.i32(ptr noalias nocapture writeonly, ptr noalias nocapture readonly, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: readwrite)
+declare void @llvm.memset.p0.i32(ptr nocapture writeonly, i8, i32, i1 immarg) nocallback nofree nounwind willreturn memory(argmem: write)



More information about the llvm-commits mailing list