[llvm] [NVPTX] support packed f32 instructions for sm_100+ (PR #126337)

Princeton Ferro via llvm-commits llvm-commits at lists.llvm.org
Sat Jun 7 02:08:53 PDT 2025


https://github.com/Prince781 updated https://github.com/llvm/llvm-project/pull/126337

>From cdac087503a57f3679af747f4ed22bd9bd4bc6ce Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Sat, 7 Jun 2025 04:59:12 -0400
Subject: [PATCH] [NVPTX] Add f32x2 instructions and register class

---
 llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp   |   20 +-
 llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp   |  209 +-
 llvm/lib/Target/NVPTX/NVPTXInstrInfo.td       |   34 +-
 llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td    |    4 +-
 llvm/lib/Target/NVPTX/NVPTXSubtarget.h        |    4 +
 llvm/test/CodeGen/NVPTX/aggregate-return.ll   |    7 +-
 llvm/test/CodeGen/NVPTX/bf16-instructions.ll  |  170 +-
 .../test/CodeGen/NVPTX/bf16x2-instructions.ll |  138 +-
 llvm/test/CodeGen/NVPTX/f16x2-instructions.ll |   57 +-
 llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 1892 +++++++++++++++++
 llvm/test/CodeGen/NVPTX/fma-relu-contract.ll  |  120 +-
 .../CodeGen/NVPTX/fma-relu-fma-intrinsic.ll   |  132 +-
 .../NVPTX/fma-relu-instruction-flag.ll        |  222 +-
 llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll  |  139 ++
 llvm/test/CodeGen/NVPTX/i8x4-instructions.ll  |   92 +-
 llvm/test/CodeGen/NVPTX/ldparam-v4.ll         |   10 +-
 llvm/test/CodeGen/NVPTX/math-intrins.ll       |   24 +-
 llvm/test/CodeGen/NVPTX/param-load-store.ll   |   10 +-
 .../CodeGen/NVPTX/reduction-intrinsics.ll     |  564 +++--
 ...unfold-masked-merge-vector-variablemask.ll |  135 +-
 llvm/test/CodeGen/NVPTX/vector-loads.ll       |   48 +-
 21 files changed, 3179 insertions(+), 852 deletions(-)
 create mode 100644 llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
 create mode 100644 llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll

diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 32223bf3d601e..19ddf2a8f500d 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -471,8 +471,13 @@ bool NVPTXDAGToDAGISel::tryEXTRACT_VECTOR_ELEMENT(SDNode *N) {
   // We only care about 16x2 as it's the only real vector type we
   // need to deal with.
   MVT VT = Vector.getSimpleValueType();
-  if (!Isv2x16VT(VT))
-    return false;
+  auto Opcode = NVPTX::I32toV2I16;
+  if (!Isv2x16VT(VT)) {
+    if (VT == MVT::v2f32)
+      Opcode = NVPTX::I64toV2I32;
+    else
+      return false;
+  }
   // Find and record all uses of this vector that extract element 0 or 1.
   SmallVector<SDNode *, 4> E0, E1;
   for (auto *U : Vector.getNode()->users()) {
@@ -496,11 +501,11 @@ bool NVPTXDAGToDAGISel::tryEXTRACT_VECTOR_ELEMENT(SDNode *N) {
   if (E0.empty() || E1.empty())
     return false;
 
-  // Merge (f16 extractelt(V, 0), f16 extractelt(V,1))
-  // into f16,f16 SplitF16x2(V)
+  // Merge (EltTy extractelt(V, 0), EltTy extractelt(V,1))
+  // into EltTy,EltTy Split[EltTy]x2(V)
   MVT EltVT = VT.getVectorElementType();
   SDNode *ScatterOp =
-      CurDAG->getMachineNode(NVPTX::I32toV2I16, SDLoc(N), EltVT, EltVT, Vector);
+      CurDAG->getMachineNode(Opcode, SDLoc(N), EltVT, EltVT, Vector);
   for (auto *Node : E0)
     ReplaceUses(SDValue(Node, 0), SDValue(ScatterOp, 0));
   for (auto *Node : E1)
@@ -1035,6 +1040,7 @@ pickOpcodeForVT(MVT::SimpleValueType VT, std::optional<unsigned> Opcode_i8,
   case MVT::i32:
   case MVT::f32:
     return Opcode_i32;
+  case MVT::v2f32:
   case MVT::i64:
   case MVT::f64:
     return Opcode_i64;
@@ -1245,7 +1251,9 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
     EltVT = EltVT.getVectorElementType();
     // vectors of 8/16bits type are loaded/stored as multiples of v4i8/v2x16
     // elements.
-    if ((EltVT == MVT::f16 && OrigType == MVT::v2f16) ||
+    // Packed vector types are loaded/stored in a single register.
+    if ((EltVT == MVT::f32 && OrigType == MVT::v2f32) ||
+        (EltVT == MVT::f16 && OrigType == MVT::v2f16) ||
         (EltVT == MVT::bf16 && OrigType == MVT::v2bf16) ||
         (EltVT == MVT::i16 && OrigType == MVT::v2i16) ||
         (EltVT == MVT::i8 && OrigType == MVT::v4i8)) {
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index d6a134d9abafd..3a4cccc12435d 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -186,9 +186,9 @@ static bool IsPTXVectorType(MVT VT) {
   }
 }
 
-static bool Is16bitsType(MVT VT) {
+static bool isPackedElementVT(MVT VT) {
   return (VT.SimpleTy == MVT::f16 || VT.SimpleTy == MVT::bf16 ||
-          VT.SimpleTy == MVT::i16);
+          VT.SimpleTy == MVT::i16 || VT.SimpleTy == MVT::f32);
 }
 
 // When legalizing vector loads/stores, this function is called, which does two
@@ -321,11 +321,8 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
   }
 
   ComputeValueVTs(TLI, DL, Ty, TempVTs, &TempOffsets, StartingOffset);
-  for (unsigned i = 0, e = TempVTs.size(); i != e; ++i) {
-    EVT VT = TempVTs[i];
-    uint64_t Off = TempOffsets[i];
-    // Split vectors into individual elements, except for v2f16, which
-    // we will pass as a single scalar.
+  for (auto [VT, Off] : zip(TempVTs, TempOffsets)) {
+    // Split vectors into individual elements, except for packed types
     if (VT.isVector()) {
       unsigned NumElts = VT.getVectorNumElements();
       EVT EltVT = VT.getVectorElementType();
@@ -333,10 +330,10 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
       // TargetLoweringBase::getVectorTypeBreakdown() which is invoked in
       // ComputePTXValueVTs() cannot currently break down non-power-of-2 sized
       // vectors.
-      if ((Is16bitsType(EltVT.getSimpleVT())) && NumElts % 2 == 0 &&
+      if (isPackedElementVT(EltVT.getSimpleVT()) && NumElts > 1 &&
           isPowerOf2_32(NumElts)) {
-        // Vectors with an even number of f16 elements will be passed to
-        // us as an array of v2f16/v2bf16 elements. We must match this so we
+        // Vectors with an even number of elements will be passed to
+        // us as an array of pairs of 2 elements. We must match this so we
         // stay in sync with Ins/Outs.
         switch (EltVT.getSimpleVT().SimpleTy) {
         case MVT::f16:
@@ -348,6 +345,9 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
         case MVT::i16:
           EltVT = MVT::v2i16;
           break;
+        case MVT::f32:
+          EltVT = MVT::v2f32;
+          break;
         default:
           llvm_unreachable("Unexpected type");
         }
@@ -592,6 +592,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   addRegisterClass(MVT::v2f16, &NVPTX::Int32RegsRegClass);
   addRegisterClass(MVT::bf16, &NVPTX::Int16RegsRegClass);
   addRegisterClass(MVT::v2bf16, &NVPTX::Int32RegsRegClass);
+  addRegisterClass(MVT::v2f32, &NVPTX::Int64RegsRegClass);
 
   // Conversion to/from FP16/FP16x2 is always legal.
   setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
@@ -628,6 +629,10 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
 
+  // No support for these operations with v2f32.
+  setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Expand);
+  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Expand);
+
   // Custom conversions to/from v2i8.
   setOperationAction(ISD::BITCAST, MVT::v2i8, Custom);
 
@@ -653,12 +658,16 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
 
   // Operations not directly supported by NVPTX.
   for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
-                 MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, MVT::v4i8,
-                 MVT::i32, MVT::i64}) {
+                 MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
+                 MVT::v4i8, MVT::i32, MVT::i64}) {
     setOperationAction(ISD::SELECT_CC, VT, Expand);
     setOperationAction(ISD::BR_CC, VT, Expand);
   }
 
+  // Not directly supported. TLI would attempt to expand operations like
+  // FMINIMUM(v2f32) using invalid SETCC and VSELECT nodes.
+  setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
+
   // Some SIGN_EXTEND_INREG can be done using cvt instruction.
   // For others we will expand to a SHL/SRA pair.
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i64, Legal);
@@ -843,6 +852,11 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   if (STI.allowFP16Math() || STI.hasBF16Math())
     setTargetDAGCombine(ISD::SETCC);
 
+  // Combine packed reduction operations (e.g. fadd.f32x2) with vector shuffles
+  // when one of their lanes is a no-op.
+  if (STI.allowFP16Math() || STI.hasBF16Math() || STI.hasF32x2Instructions())
+    setTargetDAGCombine({ISD::FADD, ISD::FMUL, ISD::FMINIMUM, ISD::FMAXIMUM});
+
   // Promote fp16 arithmetic if fp16 hardware isn't available or the
   // user passed --nvptx-no-fp16-math. The flag is useful because,
   // although sm_53+ GPUs have some sort of FP16 support in
@@ -857,6 +871,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
     setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
     if (getOperationAction(Op, MVT::bf16) == Promote)
       AddPromotedToType(Op, MVT::bf16, MVT::f32);
+    setOperationAction(Op, MVT::v2f32,
+                       STI.hasF32x2Instructions() ? Legal : Expand);
   }
 
   // On SM80, we select add/mul/sub as fma to avoid promotion to float
@@ -878,6 +894,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
 
   setBF16OperationAction(ISD::FNEG, MVT::bf16, Legal, Expand);
   setBF16OperationAction(ISD::FNEG, MVT::v2bf16, Legal, Expand);
+  setOperationAction(ISD::FNEG, MVT::v2f32, Expand);
   // (would be) Library functions.
 
   // These map to conversion instructions for scalar FP types.
@@ -888,6 +905,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
     setOperationAction(Op, MVT::f64, Legal);
     setOperationAction(Op, MVT::v2f16, Expand);
     setOperationAction(Op, MVT::v2bf16, Expand);
+    setOperationAction(Op, MVT::v2f32, Expand);
     setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
     if (getOperationAction(Op, MVT::bf16) == Promote)
       AddPromotedToType(Op, MVT::bf16, MVT::f32);
@@ -903,6 +921,11 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
     }
   }
 
+  // Expand v2f32 = fp_extend
+  setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Expand);
+  // Expand v2[b]f16 = fp_round v2f32
+  setOperationAction(ISD::FP_ROUND, {MVT::v2bf16, MVT::v2f16}, Expand);
+
   // sm_80 only has conversions between f32 and bf16. Custom lower all other
   // bf16 conversions.
   if (STI.getSmVersion() < 90 || STI.getPTXVersion() < 78) {
@@ -942,12 +965,14 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
     setOperationAction(Op, MVT::f64, Legal);
     setOperationAction(Op, MVT::v2f16, Expand);
     setOperationAction(Op, MVT::v2bf16, Expand);
+    setOperationAction(Op, MVT::v2f32, Expand);
     setOperationAction(Op, MVT::bf16, Promote);
     AddPromotedToType(Op, MVT::bf16, MVT::f32);
   }
   setOperationAction(ISD::FREM, {MVT::f32, MVT::f64}, Custom);
 
   setOperationAction(ISD::FABS, {MVT::f32, MVT::f64}, Legal);
+  setOperationAction(ISD::FABS, MVT::v2f32, Expand);
   if (STI.getPTXVersion() >= 65) {
     setFP16OperationAction(ISD::FABS, MVT::f16, Legal, Promote);
     setFP16OperationAction(ISD::FABS, MVT::v2f16, Legal, Expand);
@@ -969,6 +994,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
     setBF16OperationAction(Op, MVT::bf16, Legal, Promote);
     if (getOperationAction(Op, MVT::bf16) == Promote)
       AddPromotedToType(Op, MVT::bf16, MVT::f32);
+    setOperationAction(Op, MVT::v2f32, Expand);
   }
   bool SupportsF32MinMaxNaN =
       STI.getSmVersion() >= 80 && STI.getPTXVersion() >= 70;
@@ -978,6 +1004,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
     setFP16OperationAction(Op, MVT::v2f16, Legal, Expand);
     setBF16OperationAction(Op, MVT::bf16, Legal, Expand);
     setBF16OperationAction(Op, MVT::v2bf16, Legal, Expand);
+    setOperationAction(Op, MVT::v2f32, Expand);
   }
 
   // Custom lowering for inline asm with 128-bit operands
@@ -990,6 +1017,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   // - bf16/bf16x2 (sm_90+, PTX 7.8+)
   // When f16/bf16 types aren't supported, they are promoted/expanded to f32.
   setOperationAction(ISD::FEXP2, MVT::f32, Legal);
+  setOperationAction(ISD::FEXP2, MVT::v2f32, Expand);
   setFP16OperationAction(ISD::FEXP2, MVT::f16, Legal, Promote);
   setFP16OperationAction(ISD::FEXP2, MVT::v2f16, Legal, Expand);
   setBF16OperationAction(ISD::FEXP2, MVT::bf16, Legal, Promote);
@@ -1001,7 +1029,8 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
     setOperationAction(ISD::FLOG2, MVT::f32, Legal);
     setOperationPromotedToType(ISD::FLOG2, MVT::f16, MVT::f32);
     setOperationPromotedToType(ISD::FLOG2, MVT::bf16, MVT::f32);
-    setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16}, Expand);
+    setOperationAction(ISD::FLOG2, {MVT::v2f16, MVT::v2bf16, MVT::v2f32},
+                       Expand);
   }
 
   setOperationAction(ISD::ADDRSPACECAST, {MVT::i32, MVT::i64}, Custom);
@@ -3126,22 +3155,38 @@ SDValue NVPTXTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
                       MachinePointerInfo(SV));
 }
 
+static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
+                              SmallVectorImpl<SDValue> &Results,
+                              const NVPTXSubtarget &STI);
+
 SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
   if (Op.getValueType() == MVT::i1)
     return LowerLOADi1(Op, DAG);
 
-  // v2f16/v2bf16/v2i16/v4i8 are legal, so we can't rely on legalizer to handle
-  // unaligned loads and have to handle it here.
   EVT VT = Op.getValueType();
-  if (Isv2x16VT(VT) || VT == MVT::v4i8) {
-    LoadSDNode *Load = cast<LoadSDNode>(Op);
-    EVT MemVT = Load->getMemoryVT();
-    if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
-                                        MemVT, *Load->getMemOperand())) {
-      SDValue Ops[2];
-      std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
-      return DAG.getMergeValues(Ops, SDLoc(Op));
-    }
+  if (!(Isv2x16VT(VT) || VT == MVT::v4i8 || VT == MVT::v2f32))
+      return SDValue();
+
+  // v2f32/v2f16/v2bf16/v2i16/v4i8 are legal, so we can't rely on legalizer to
+  // handle unaligned loads and have to handle it here.
+  LoadSDNode *Load = cast<LoadSDNode>(Op);
+  EVT MemVT = Load->getMemoryVT();
+  if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
+                                      MemVT, *Load->getMemOperand())) {
+    SDValue Ops[2];
+    std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
+    return DAG.getMergeValues(Ops, SDLoc(Op));
+  }
+
+  if (VT == MVT::v2f32) {
+    // If we have instructions accessing f32 elements, it's better to not pack
+    // them. Lower the load now as `f32,f32,ch = LoadV2` (ld.v2.f32), rather
+    // than waiting until ISel when it'll be lowered as ld.b64.
+    SmallVector<SDValue, 2> Results;
+    ReplaceLoadVector(Op.getNode(), DAG, Results, STI);
+    if (!Results.empty())
+      // if we succeeded, return it
+      return DAG.getMergeValues(Results, SDLoc(Op));
   }
 
   return SDValue();
@@ -3179,12 +3224,12 @@ SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
 
   // v2f16 is legal, so we can't rely on legalizer to handle unaligned
   // stores and have to handle it here.
-  if ((Isv2x16VT(VT) || VT == MVT::v4i8) &&
+  if ((Isv2x16VT(VT) || VT == MVT::v2f32 || VT == MVT::v4i8) &&
       !allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
                                       VT, *Store->getMemOperand()))
     return expandUnalignedStore(Store, DAG);
 
-  // v2f16, v2bf16 and v2i16 don't need special handling.
+  // v2f16/v2bf16/v2i16 don't need special handling.
   if (Isv2x16VT(VT) || VT == MVT::v4i8)
     return SDValue();
 
@@ -3454,19 +3499,15 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
       unsigned I = 0;
       for (const unsigned NumElts : VectorInfo) {
         const EVT EltVT = VTs[I];
-        const EVT LoadVT = [&]() -> EVT {
-          // i1 is loaded/stored as i8.
-          if (EltVT == MVT::i1)
-            return MVT::i8;
-          // getLoad needs a vector type, but it can't handle
-          // vectors which contain v2f16 or v2bf16 elements. So we must load
-          // using i32 here and then bitcast back.
-          if (EltVT.isVector())
-            return MVT::getIntegerVT(EltVT.getFixedSizeInBits());
-          return EltVT;
-        }();
-
-        const EVT VecVT = EVT::getVectorVT(F->getContext(), LoadVT, NumElts);
+        // i1 is loaded/stored as i8
+        const EVT LoadVT = EltVT == MVT::i1 ? MVT::i8 : EltVT;
+        // If the element is v2f16/v2bf16/v2f32/etc, then it really holds 2
+        // packed elements of type f16/bf16/f32.
+        const unsigned PackingAmt =
+            LoadVT.isVector() ? LoadVT.getVectorNumElements() : 1;
+
+        const EVT VecVT = EVT::getVectorVT(
+            F->getContext(), LoadVT.getScalarType(), NumElts * PackingAmt);
         SDValue VecAddr = DAG.getObjectPtrOffset(
             dl, ArgSymbol, TypeSize::getFixed(Offsets[I]));
 
@@ -3486,8 +3527,10 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
         if (P.getNode())
           P.getNode()->setIROrder(Arg.getArgNo() + 1);
         for (const unsigned J : llvm::seq(NumElts)) {
-          SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, LoadVT, P,
-                                    DAG.getIntPtrConstant(J, dl));
+          SDValue Elt = DAG.getNode(LoadVT.isVector() ? ISD::EXTRACT_SUBVECTOR
+                                                      : ISD::EXTRACT_VECTOR_ELT,
+                                    dl, LoadVT, P,
+                                    DAG.getIntPtrConstant(J * PackingAmt, dl));
 
           // Extend or truncate the element if necessary (e.g. an i8 is loaded
           // into an i16 register)
@@ -3501,9 +3544,6 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
                               Elt);
           } else if (ExpactedVT.bitsLT(Elt.getValueType())) {
             Elt = DAG.getNode(ISD::TRUNCATE, dl, ExpactedVT, Elt);
-          } else {
-            // v2f16 was loaded as an i32. Now we must bitcast it back.
-            Elt = DAG.getBitcast(EltVT, Elt);
           }
           InVals.push_back(Elt);
         }
@@ -3595,6 +3635,9 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
   const auto VectorInfo = VectorizePTXValueVTs(VTs, Offsets, RetAlign);
   unsigned I = 0;
   for (const unsigned NumElts : VectorInfo) {
+    // amount to subdivide. If not v2f32, we don't consider packing
+    const unsigned PackingAmt = VTs[I] == MVT::v2f32 ? 2 : 1;
+
     const Align CurrentAlign = commonAlignment(RetAlign, Offsets[I]);
     if (NumElts == 1 && RetTy->isAggregateType() &&
         CurrentAlign < DAG.getEVTAlign(VTs[I])) {
@@ -3610,11 +3653,16 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
     SmallVector<SDValue, 6> StoreOperands{
         Chain, DAG.getConstant(Offsets[I], dl, MVT::i32)};
 
-    for (const unsigned J : llvm::seq(NumElts))
-      StoreOperands.push_back(GetRetVal(I + J));
+    for (const unsigned J : llvm::seq(NumElts)) {
+      SDValue Operand = GetRetVal(I + J);
+      if (PackingAmt > 1)
+        DAG.ExtractVectorElements(Operand, StoreOperands);
+      else
+        StoreOperands.push_back(Operand);
+    }
 
     NVPTXISD::NodeType Op;
-    switch (NumElts) {
+    switch (NumElts * PackingAmt) {
     case 1:
       Op = NVPTXISD::StoreRetval;
       break;
@@ -3631,6 +3679,8 @@ NVPTXTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
     // Adjust type of load/store op if we've extended the scalar
     // return value.
     EVT TheStoreType = ExtendIntegerRetVal ? MVT::i32 : VTs[I];
+    if (PackingAmt > 1)
+      TheStoreType = TheStoreType.getScalarType();
     Chain = DAG.getMemIntrinsicNode(
         Op, dl, DAG.getVTList(MVT::Other), StoreOperands, TheStoreType,
         MachinePointerInfo(), CurrentAlign, MachineMemOperand::MOStore);
@@ -5083,6 +5133,9 @@ static SDValue PerformADDCombine(SDNode *N,
   return PerformADDCombineWithOperands(N, N1, N0, DCI);
 }
 
+static SDValue PerformPackedFOpCombine(SDNode *N,
+                                       TargetLowering::DAGCombinerInfo &DCI);
+
 /// PerformFADDCombine - Target-specific dag combine xforms for ISD::FADD.
 ///
 static SDValue PerformFADDCombine(SDNode *N,
@@ -5092,6 +5145,8 @@ static SDValue PerformFADDCombine(SDNode *N,
   SDValue N1 = N->getOperand(1);
 
   EVT VT = N0.getValueType();
+  if (VT.isVector())
+      return PerformPackedFOpCombine(N, DCI);
   if (VT.isVector() || !(VT == MVT::f32 || VT == MVT::f64))
     return SDValue();
 
@@ -5103,6 +5158,54 @@ static SDValue PerformFADDCombine(SDNode *N,
   return PerformFADDCombineWithOperands(N, N1, N0, DCI, OptLevel);
 }
 
+// For vector reductions that are unrolled into packed ops (e.g. fadd.f32x2),
+// the final reduction op needs to produce a scalar. By default, this results in
+// a final packed op where one of the lanes is undef:
+//
+// v1: v2f32 = fadd reassoc a, b
+// ...
+// v[N-2]: v2f32 = fadd reassoc v[N-4], v[N-3]
+//
+// # now we combine v[N-2]:0 with v[N-2]:1
+//
+// v[N-1]: v2f32 = vector_shuffle<1,u> v[N-2], undef:v2f32
+// vN: v2f32 = fadd reassoc v[N-1], v[N-2]
+//
+// result: f32 = extractelt vN, 0
+//
+// We convert this to a scalar op.
+static SDValue PerformPackedFOpCombine(SDNode *N,
+                                       TargetLowering::DAGCombinerInfo &DCI) {
+  // Convert (fop.x2 (vector_shuffle<1,u> V), V) -> ((fop V:1, V:0), undef)
+  const EVT VectorVT = N->getValueType(0);
+  if (!(VectorVT == MVT::v2f32 || Isv2x16VT(VectorVT)))
+    return SDValue();
+
+  SDValue VecOp0 = N->getOperand(0);
+  SDValue VecOp1 = N->getOperand(1);
+
+  // canonicalize shuffle to op0
+  if (VecOp1.getOpcode() == ISD::VECTOR_SHUFFLE)
+    std::swap(VecOp0, VecOp1);
+
+  if (VecOp0.getOpcode() != ISD::VECTOR_SHUFFLE)
+    return SDValue();
+
+  auto *ShuffleOp = cast<ShuffleVectorSDNode>(VecOp0);
+  // check for dead lane
+  if (!(ShuffleOp->getMaskElt(0) == 1 && ShuffleOp->getMaskElt(1) == -1))
+    return SDValue();
+
+  SDLoc DL(N);
+  const EVT ScalarVT = VectorVT.getScalarType();
+  SDValue Lane0 = DCI.DAG.getNode(
+      N->getOpcode(), DL, ScalarVT,
+      DCI.DAG.getExtractVectorElt(DL, ScalarVT, VecOp0.getOperand(0), 1),
+      DCI.DAG.getExtractVectorElt(DL, ScalarVT, VecOp1, 0));
+  SDValue Lane1 = DCI.DAG.getUNDEF(ScalarVT);
+  return DCI.DAG.getBuildVector(VectorVT, DL, {Lane0, Lane1});
+}
+
 static SDValue PerformANDCombine(SDNode *N,
                                  TargetLowering::DAGCombinerInfo &DCI) {
   // The type legalizer turns a vector load of i8 values into a zextload to i16
@@ -5519,10 +5622,10 @@ static SDValue PerformEXTRACTCombine(SDNode *N,
       IsPTXVectorType(VectorVT.getSimpleVT()))
     return SDValue(); // Native vector loads already combine nicely w/
                       // extract_vector_elt.
-  // Don't mess with singletons or v2*16, v4i8 and v8i8 types, we already
-  // handle them OK.
-  if (VectorVT.getVectorNumElements() == 1 || Isv2x16VT(VectorVT) ||
-      VectorVT == MVT::v4i8 || VectorVT == MVT::v8i8)
+  // Don't mess with singletons or packed types (v2f32, v2*16, v4i8 and v8i8),
+  // we already handle them OK.
+  if (VectorVT.getVectorNumElements() == 1 || VectorVT == MVT::v2f32 ||
+      Isv2x16VT(VectorVT) || VectorVT == MVT::v4i8 || VectorVT == MVT::v8i8)
     return SDValue();
 
   // Don't mess with undef values as sra may be simplified to 0, not undef.
@@ -5676,6 +5779,10 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
       return PerformADDCombine(N, DCI, OptLevel);
     case ISD::FADD:
       return PerformFADDCombine(N, DCI, OptLevel);
+    case ISD::FMUL:
+    case ISD::FMINIMUM:
+    case ISD::FMAXIMUM:
+      return PerformPackedFOpCombine(N, DCI);
     case ISD::MUL:
       return PerformMULCombine(N, DCI, OptLevel);
     case ISD::SHL:
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index b646d39194c7e..2cf7c1008ec22 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -158,6 +158,7 @@ def hasHWROT32 : Predicate<"Subtarget->hasHWROT32()">;
 def noHWROT32 : Predicate<"!Subtarget->hasHWROT32()">;
 def hasDotInstructions : Predicate<"Subtarget->hasDotInstructions()">;
 def hasTcgen05Instructions : Predicate<"Subtarget->hasTcgen05Instructions()">;
+def hasF32x2Instructions : Predicate<"Subtarget->hasF32x2Instructions()">;
 
 def True : Predicate<"true">;
 def False : Predicate<"false">;
@@ -193,6 +194,7 @@ class ValueToRegClass<ValueType T> {
      !eq(name, "bf16"): Int16Regs,
      !eq(name, "v2bf16"): Int32Regs,
      !eq(name, "f32"): Float32Regs,
+     !eq(name, "v2f32"): Int64Regs,
      !eq(name, "f64"): Float64Regs,
      !eq(name, "ai32"): Int32ArgRegs,
      !eq(name, "ai64"): Int64ArgRegs,
@@ -239,6 +241,7 @@ def BF16RT   : RegTyInfo<bf16, Int16Regs, bf16imm, fpimm, supports_imm = 0>;
 
 def F16X2RT  : RegTyInfo<v2f16, Int32Regs, ?, ?, supports_imm = 0>;
 def BF16X2RT : RegTyInfo<v2bf16, Int32Regs, ?, ?, supports_imm = 0>;
+def F32X2RT  : RegTyInfo<v2f32, Int64Regs, ?, ?, supports_imm = 0>;
 
 
 // This class provides a basic wrapper around an NVPTXInst that abstracts the
@@ -461,6 +464,18 @@ multiclass F3<string op_str, SDPatternOperator op_pat> {
               [(set f16:$dst, (op_pat f16:$a, f16:$b))]>,
               Requires<[useFP16Math]>;
 
+  def f32x2rr_ftz :
+    BasicNVPTXInst<(outs Int64Regs:$dst),
+              (ins Int64Regs:$a, Int64Regs:$b),
+              op_str # ".ftz.f32x2",
+              [(set v2f32:$dst, (op_pat v2f32:$a, v2f32:$b))]>,
+              Requires<[hasF32x2Instructions, doF32FTZ]>;
+  def f32x2rr :
+    BasicNVPTXInst<(outs Int64Regs:$dst),
+              (ins Int64Regs:$a, Int64Regs:$b),
+              op_str # ".f32x2",
+              [(set v2f32:$dst, (op_pat v2f32:$a, v2f32:$b))]>,
+              Requires<[hasF32x2Instructions]>;
   def f16x2rr_ftz :
     BasicNVPTXInst<(outs Int32Regs:$dst),
               (ins Int32Regs:$a, Int32Regs:$b),
@@ -839,6 +854,9 @@ def : Pat<(vt (select i1:$p, vt:$a, vt:$b)),
           (SELP_b32rr $a, $b, $p)>;
 }
 
+def : Pat<(v2f32 (select i1:$p, v2f32:$a, v2f32:$b)),
+          (SELP_b64rr $a, $b, $p)>;
+
 //-----------------------------------
 // Test Instructions
 //-----------------------------------
@@ -1387,6 +1405,8 @@ defm BFMA16       : FMA<"fma.rn.bf16", BF16RT, [hasBF16Math]>;
 defm BFMA16x2     : FMA<"fma.rn.bf16x2", BF16X2RT, [hasBF16Math]>;
 defm FMA32_ftz    : FMA<"fma.rn.ftz.f32", F32RT, [doF32FTZ]>;
 defm FMA32        : FMA<"fma.rn.f32", F32RT>;
+defm FMA32x2_ftz  : FMA<"fma.rn.ftz.f32x2", F32X2RT, [hasF32x2Instructions, doF32FTZ]>;
+defm FMA32x2      : FMA<"fma.rn.f32x2", F32X2RT, [hasF32x2Instructions]>;
 defm FMA64        : FMA<"fma.rn.f64", F64RT>;
 
 // sin/cos
@@ -2739,6 +2759,7 @@ def : Pat<(i32 (trunc (sra i64:$s, (i32 32)))), (I64toI32H $s)>;
 def: Pat<(i32 (sext (extractelt v2i16:$src, 0))),
          (CVT_INREG_s32_s16 $src)>;
 
+// Handle extracting one element from the pair (32-bit types)
 foreach vt = [v2f16, v2bf16, v2i16] in {
   def : Pat<(extractelt vt:$src, 0), (I32toI16L_Sink $src)>, Requires<[hasPTX<71>]>;
   def : Pat<(extractelt vt:$src, 1), (I32toI16H_Sink $src)>, Requires<[hasPTX<71>]>;
@@ -2750,10 +2771,21 @@ foreach vt = [v2f16, v2bf16, v2i16] in {
             (V2I16toI32 $a, $b)>;
 }
 
+// Same thing for the 64-bit type v2f32.
+foreach vt = [v2f32] in {
+  def : Pat<(extractelt vt:$src, 0), (I64toI32L_Sink $src)>, Requires<[hasPTX<71>]>;
+  def : Pat<(extractelt vt:$src, 1), (I64toI32H_Sink $src)>, Requires<[hasPTX<71>]>;
+
+  def : Pat<(extractelt vt:$src, 0), (I64toI32L $src)>;
+  def : Pat<(extractelt vt:$src, 1), (I64toI32H $src)>;
+
+  def : Pat<(vt (build_vector vt.ElementType:$a, vt.ElementType:$b)), 
+            (V2I32toI64 $a, $b)>;
+}
+
 def: Pat<(v2i16 (scalar_to_vector i16:$a)),
          (CVT_u32_u16 $a, CvtNONE)>;
 
-
 def nvptx_build_vector : SDNode<"NVPTXISD::BUILD_VECTOR", SDTypeProfile<1, 2, []>, []>;
 
 def : Pat<(i64 (nvptx_build_vector i32:$a, i32:$b)),
diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
index 2eea9e9721cdf..0d364be29b9ec 100644
--- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
@@ -60,7 +60,9 @@ def Int16Regs : NVPTXRegClass<[i16, f16, bf16], 16, (add (sequence "RS%u", 0, 4)
 def Int32Regs : NVPTXRegClass<[i32, v2f16, v2bf16, v2i16, v4i8, f32], 32,
                               (add (sequence "R%u", 0, 4),
                               VRFrame32, VRFrameLocal32)>;
-def Int64Regs : NVPTXRegClass<[i64, f64], 64, (add (sequence "RL%u", 0, 4), VRFrame64, VRFrameLocal64)>;
+def Int64Regs : NVPTXRegClass<[i64, v2f32, f64], 64,
+                              (add (sequence "RL%u", 0, 4),
+                              VRFrame64, VRFrameLocal64)>;
 // 128-bit regs are not defined as general regs in NVPTX. They are used for inlineASM only.
 def Int128Regs : NVPTXRegClass<[i128], 128, (add (sequence "RQ%u", 0, 4))>;
 
diff --git a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
index 5136b1ee28502..6bd52285126cb 100644
--- a/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
+++ b/llvm/lib/Target/NVPTX/NVPTXSubtarget.h
@@ -116,6 +116,10 @@ class NVPTXSubtarget : public NVPTXGenSubtargetInfo {
 
     return HasTcgen05 && PTXVersion >= 86;
   }
+  // f32x2 instructions in Blackwell family
+  bool hasF32x2Instructions() const {
+    return SmVersion >= 100 && PTXVersion >= 86;
+  }
 
   // Prior to CUDA 12.3 ptxas did not recognize that the trap instruction
   // terminates a basic block. Instead, it would assume that control flow
diff --git a/llvm/test/CodeGen/NVPTX/aggregate-return.ll b/llvm/test/CodeGen/NVPTX/aggregate-return.ll
index 1c8f019922e37..f17cb5bf49693 100644
--- a/llvm/test/CodeGen/NVPTX/aggregate-return.ll
+++ b/llvm/test/CodeGen/NVPTX/aggregate-return.ll
@@ -10,7 +10,8 @@ define void @test_v2f32(<2 x float> %input, ptr %output) {
 ; CHECK-LABEL: @test_v2f32
   %call = tail call <2 x float> @barv(<2 x float> %input)
 ; CHECK: .param .align 8 .b8 retval0[8];
-; CHECK: ld.param.v2.b32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [retval0];
+; CHECK: ld.param.b64 [[E0_1:%rd[0-9]+]], [retval0];
+; CHECK: mov.b64 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [[E0_1]]
   store <2 x float> %call, ptr %output, align 8
 ; CHECK: st.v2.b32 [{{%rd[0-9]+}}], {[[E0]], [[E1]]}
   ret void
@@ -27,9 +28,7 @@ define void @test_v3f32(<3 x float> %input, ptr %output) {
 ; CHECK-NOT: ld.param.b32 [[E3:%r[0-9]+]], [retval0+12];
   store <3 x float> %call, ptr %output, align 8
 ; CHECK-DAG: st.b32 [{{%rd[0-9]}}+8],
-; -- This is suboptimal. We should do st.v2.f32 instead
-;    of combining 2xf32 info i64.
-; CHECK-DAG: st.b64 [{{%rd[0-9]}}],
+; CHECK-DAG: st.v2.b32 [{{%rd[0-9]}}],
 ; CHECK: ret;
   ret void
 }
diff --git a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
index 32225ed04e2d9..38c7466705bb2 100644
--- a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
@@ -184,10 +184,10 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    .reg .b32 %r<5>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
-; SM80-NEXT:    ld.param.b32 %r1, [test_faddx2_param_1];
-; SM80-NEXT:    ld.param.b32 %r2, [test_faddx2_param_0];
+; SM80-NEXT:    ld.param.b32 %r1, [test_faddx2_param_0];
+; SM80-NEXT:    ld.param.b32 %r2, [test_faddx2_param_1];
 ; SM80-NEXT:    mov.b32 %r3, 1065369472;
-; SM80-NEXT:    fma.rn.bf16x2 %r4, %r2, %r3, %r1;
+; SM80-NEXT:    fma.rn.bf16x2 %r4, %r1, %r3, %r2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r4;
 ; SM80-NEXT:    ret;
 ;
@@ -216,9 +216,9 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM90-NEXT:    .reg .b32 %r<4>;
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b32 %r1, [test_faddx2_param_1];
-; SM90-NEXT:    ld.param.b32 %r2, [test_faddx2_param_0];
-; SM90-NEXT:    add.rn.bf16x2 %r3, %r2, %r1;
+; SM90-NEXT:    ld.param.b32 %r1, [test_faddx2_param_0];
+; SM90-NEXT:    ld.param.b32 %r2, [test_faddx2_param_1];
+; SM90-NEXT:    add.rn.bf16x2 %r3, %r1, %r2;
 ; SM90-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM90-NEXT:    ret;
   %r = fadd <2 x bfloat> %a, %b
@@ -269,9 +269,9 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.b32 %r1, [test_fsubx2_param_0];
-; SM80-NEXT:    ld.param.b32 %r2, [test_fsubx2_param_1];
-; SM80-NEXT:    mov.b32 %r3, -1082081408;
-; SM80-NEXT:    fma.rn.bf16x2 %r4, %r2, %r3, %r1;
+; SM80-NEXT:    mov.b32 %r2, -1082081408;
+; SM80-NEXT:    ld.param.b32 %r3, [test_fsubx2_param_1];
+; SM80-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r4;
 ; SM80-NEXT:    ret;
 ;
@@ -300,9 +300,9 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM90-NEXT:    .reg .b32 %r<4>;
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b32 %r1, [test_fsubx2_param_1];
-; SM90-NEXT:    ld.param.b32 %r2, [test_fsubx2_param_0];
-; SM90-NEXT:    sub.rn.bf16x2 %r3, %r2, %r1;
+; SM90-NEXT:    ld.param.b32 %r1, [test_fsubx2_param_0];
+; SM90-NEXT:    ld.param.b32 %r2, [test_fsubx2_param_1];
+; SM90-NEXT:    sub.rn.bf16x2 %r3, %r1, %r2;
 ; SM90-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM90-NEXT:    ret;
   %r = fsub <2 x bfloat> %a, %b
@@ -352,10 +352,10 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    .reg .b32 %r<5>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
-; SM80-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_1];
-; SM80-NEXT:    ld.param.b32 %r2, [test_fmulx2_param_0];
-; SM80-NEXT:    mov.b32 %r3, -2147450880;
-; SM80-NEXT:    fma.rn.bf16x2 %r4, %r2, %r1, %r3;
+; SM80-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_0];
+; SM80-NEXT:    mov.b32 %r2, -2147450880;
+; SM80-NEXT:    ld.param.b32 %r3, [test_fmulx2_param_1];
+; SM80-NEXT:    fma.rn.bf16x2 %r4, %r1, %r3, %r2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r4;
 ; SM80-NEXT:    ret;
 ;
@@ -384,9 +384,9 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM90-NEXT:    .reg .b32 %r<4>;
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_1];
-; SM90-NEXT:    ld.param.b32 %r2, [test_fmulx2_param_0];
-; SM90-NEXT:    mul.rn.bf16x2 %r3, %r2, %r1;
+; SM90-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_0];
+; SM90-NEXT:    ld.param.b32 %r2, [test_fmulx2_param_1];
+; SM90-NEXT:    mul.rn.bf16x2 %r3, %r1, %r2;
 ; SM90-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM90-NEXT:    ret;
   %r = fmul <2 x bfloat> %a, %b
@@ -712,25 +712,25 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
 ; SM70-NEXT:  // %bb.0:
 ; SM70-NEXT:    ld.param.b64 %rd1, [test_extload_bf16x8_param_0];
 ; SM70-NEXT:    ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
-; SM70-NEXT:    mov.b32 {%rs5, %rs6}, %r3;
-; SM70-NEXT:    mov.b32 {%rs7, %rs8}, %r4;
-; SM70-NEXT:    cvt.u32.u16 %r5, %rs8;
+; SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
+; SM70-NEXT:    cvt.u32.u16 %r5, %rs2;
 ; SM70-NEXT:    shl.b32 %r6, %r5, 16;
-; SM70-NEXT:    cvt.u32.u16 %r7, %rs7;
+; SM70-NEXT:    cvt.u32.u16 %r7, %rs1;
 ; SM70-NEXT:    shl.b32 %r8, %r7, 16;
-; SM70-NEXT:    cvt.u32.u16 %r9, %rs6;
+; SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r3;
+; SM70-NEXT:    cvt.u32.u16 %r9, %rs4;
 ; SM70-NEXT:    shl.b32 %r10, %r9, 16;
-; SM70-NEXT:    cvt.u32.u16 %r11, %rs5;
+; SM70-NEXT:    cvt.u32.u16 %r11, %rs3;
 ; SM70-NEXT:    shl.b32 %r12, %r11, 16;
-; SM70-NEXT:    cvt.u32.u16 %r13, %rs4;
+; SM70-NEXT:    mov.b32 {%rs5, %rs6}, %r2;
+; SM70-NEXT:    cvt.u32.u16 %r13, %rs6;
 ; SM70-NEXT:    shl.b32 %r14, %r13, 16;
-; SM70-NEXT:    cvt.u32.u16 %r15, %rs3;
+; SM70-NEXT:    cvt.u32.u16 %r15, %rs5;
 ; SM70-NEXT:    shl.b32 %r16, %r15, 16;
-; SM70-NEXT:    cvt.u32.u16 %r17, %rs2;
+; SM70-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
+; SM70-NEXT:    cvt.u32.u16 %r17, %rs8;
 ; SM70-NEXT:    shl.b32 %r18, %r17, 16;
-; SM70-NEXT:    cvt.u32.u16 %r19, %rs1;
+; SM70-NEXT:    cvt.u32.u16 %r19, %rs7;
 ; SM70-NEXT:    shl.b32 %r20, %r19, 16;
 ; SM70-NEXT:    st.param.v4.b32 [func_retval0], {%r20, %r18, %r16, %r14};
 ; SM70-NEXT:    st.param.v4.b32 [func_retval0+16], {%r12, %r10, %r8, %r6};
@@ -745,18 +745,18 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.b64 %rd1, [test_extload_bf16x8_param_0];
 ; SM80-NEXT:    ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
-; SM80-NEXT:    mov.b32 {%rs5, %rs6}, %r3;
-; SM80-NEXT:    mov.b32 {%rs7, %rs8}, %r4;
-; SM80-NEXT:    cvt.f32.bf16 %r5, %rs8;
-; SM80-NEXT:    cvt.f32.bf16 %r6, %rs7;
-; SM80-NEXT:    cvt.f32.bf16 %r7, %rs6;
-; SM80-NEXT:    cvt.f32.bf16 %r8, %rs5;
-; SM80-NEXT:    cvt.f32.bf16 %r9, %rs4;
-; SM80-NEXT:    cvt.f32.bf16 %r10, %rs3;
-; SM80-NEXT:    cvt.f32.bf16 %r11, %rs2;
-; SM80-NEXT:    cvt.f32.bf16 %r12, %rs1;
+; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
+; SM80-NEXT:    cvt.f32.bf16 %r5, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %r6, %rs1;
+; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r3;
+; SM80-NEXT:    cvt.f32.bf16 %r7, %rs4;
+; SM80-NEXT:    cvt.f32.bf16 %r8, %rs3;
+; SM80-NEXT:    mov.b32 {%rs5, %rs6}, %r2;
+; SM80-NEXT:    cvt.f32.bf16 %r9, %rs6;
+; SM80-NEXT:    cvt.f32.bf16 %r10, %rs5;
+; SM80-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
+; SM80-NEXT:    cvt.f32.bf16 %r11, %rs8;
+; SM80-NEXT:    cvt.f32.bf16 %r12, %rs7;
 ; SM80-NEXT:    st.param.v4.b32 [func_retval0], {%r12, %r11, %r10, %r9};
 ; SM80-NEXT:    st.param.v4.b32 [func_retval0+16], {%r8, %r7, %r6, %r5};
 ; SM80-NEXT:    ret;
@@ -770,18 +770,18 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
 ; SM80-FTZ-NEXT:  // %bb.0:
 ; SM80-FTZ-NEXT:    ld.param.b64 %rd1, [test_extload_bf16x8_param_0];
 ; SM80-FTZ-NEXT:    ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; SM80-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; SM80-FTZ-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
-; SM80-FTZ-NEXT:    mov.b32 {%rs5, %rs6}, %r3;
-; SM80-FTZ-NEXT:    mov.b32 {%rs7, %rs8}, %r4;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r5, %rs8;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r6, %rs7;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r7, %rs6;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r8, %rs5;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r9, %rs4;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r10, %rs3;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r11, %rs2;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r12, %rs1;
+; SM80-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r5, %rs2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r6, %rs1;
+; SM80-FTZ-NEXT:    mov.b32 {%rs3, %rs4}, %r3;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r7, %rs4;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r8, %rs3;
+; SM80-FTZ-NEXT:    mov.b32 {%rs5, %rs6}, %r2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r9, %rs6;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r10, %rs5;
+; SM80-FTZ-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r11, %rs8;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %r12, %rs7;
 ; SM80-FTZ-NEXT:    st.param.v4.b32 [func_retval0], {%r12, %r11, %r10, %r9};
 ; SM80-FTZ-NEXT:    st.param.v4.b32 [func_retval0+16], {%r8, %r7, %r6, %r5};
 ; SM80-FTZ-NEXT:    ret;
@@ -795,18 +795,18 @@ define <8 x float> @test_extload_bf16x8(ptr addrspace(3) noundef %arg) #0 {
 ; SM90-NEXT:  // %bb.0:
 ; SM90-NEXT:    ld.param.b64 %rd1, [test_extload_bf16x8_param_0];
 ; SM90-NEXT:    ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; SM90-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; SM90-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
-; SM90-NEXT:    mov.b32 {%rs5, %rs6}, %r3;
-; SM90-NEXT:    mov.b32 {%rs7, %rs8}, %r4;
-; SM90-NEXT:    cvt.f32.bf16 %r5, %rs8;
-; SM90-NEXT:    cvt.f32.bf16 %r6, %rs7;
-; SM90-NEXT:    cvt.f32.bf16 %r7, %rs6;
-; SM90-NEXT:    cvt.f32.bf16 %r8, %rs5;
-; SM90-NEXT:    cvt.f32.bf16 %r9, %rs4;
-; SM90-NEXT:    cvt.f32.bf16 %r10, %rs3;
-; SM90-NEXT:    cvt.f32.bf16 %r11, %rs2;
-; SM90-NEXT:    cvt.f32.bf16 %r12, %rs1;
+; SM90-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
+; SM90-NEXT:    cvt.f32.bf16 %r5, %rs2;
+; SM90-NEXT:    cvt.f32.bf16 %r6, %rs1;
+; SM90-NEXT:    mov.b32 {%rs3, %rs4}, %r3;
+; SM90-NEXT:    cvt.f32.bf16 %r7, %rs4;
+; SM90-NEXT:    cvt.f32.bf16 %r8, %rs3;
+; SM90-NEXT:    mov.b32 {%rs5, %rs6}, %r2;
+; SM90-NEXT:    cvt.f32.bf16 %r9, %rs6;
+; SM90-NEXT:    cvt.f32.bf16 %r10, %rs5;
+; SM90-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
+; SM90-NEXT:    cvt.f32.bf16 %r11, %rs8;
+; SM90-NEXT:    cvt.f32.bf16 %r12, %rs7;
 ; SM90-NEXT:    st.param.v4.b32 [func_retval0], {%r12, %r11, %r10, %r9};
 ; SM90-NEXT:    st.param.v4.b32 [func_retval0+16], {%r8, %r7, %r6, %r5};
 ; SM90-NEXT:    ret;
@@ -1525,9 +1525,9 @@ define <2 x bfloat> @test_maximum_v2(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; SM80-NEXT:    .reg .b32 %r<4>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
-; SM80-NEXT:    ld.param.b32 %r1, [test_maximum_v2_param_1];
-; SM80-NEXT:    ld.param.b32 %r2, [test_maximum_v2_param_0];
-; SM80-NEXT:    max.NaN.bf16x2 %r3, %r2, %r1;
+; SM80-NEXT:    ld.param.b32 %r1, [test_maximum_v2_param_0];
+; SM80-NEXT:    ld.param.b32 %r2, [test_maximum_v2_param_1];
+; SM80-NEXT:    max.NaN.bf16x2 %r3, %r1, %r2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-NEXT:    ret;
 ;
@@ -1536,9 +1536,9 @@ define <2 x bfloat> @test_maximum_v2(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; SM80-FTZ-NEXT:    .reg .b32 %r<4>;
 ; SM80-FTZ-EMPTY:
 ; SM80-FTZ-NEXT:  // %bb.0:
-; SM80-FTZ-NEXT:    ld.param.b32 %r1, [test_maximum_v2_param_1];
-; SM80-FTZ-NEXT:    ld.param.b32 %r2, [test_maximum_v2_param_0];
-; SM80-FTZ-NEXT:    max.NaN.bf16x2 %r3, %r2, %r1;
+; SM80-FTZ-NEXT:    ld.param.b32 %r1, [test_maximum_v2_param_0];
+; SM80-FTZ-NEXT:    ld.param.b32 %r2, [test_maximum_v2_param_1];
+; SM80-FTZ-NEXT:    max.NaN.bf16x2 %r3, %r1, %r2;
 ; SM80-FTZ-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-FTZ-NEXT:    ret;
 ;
@@ -1547,9 +1547,9 @@ define <2 x bfloat> @test_maximum_v2(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; SM90-NEXT:    .reg .b32 %r<4>;
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b32 %r1, [test_maximum_v2_param_1];
-; SM90-NEXT:    ld.param.b32 %r2, [test_maximum_v2_param_0];
-; SM90-NEXT:    max.NaN.bf16x2 %r3, %r2, %r1;
+; SM90-NEXT:    ld.param.b32 %r1, [test_maximum_v2_param_0];
+; SM90-NEXT:    ld.param.b32 %r2, [test_maximum_v2_param_1];
+; SM90-NEXT:    max.NaN.bf16x2 %r3, %r1, %r2;
 ; SM90-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM90-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.maximum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b)
@@ -1599,9 +1599,9 @@ define <2 x bfloat> @test_maxnum_v2(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; SM80-NEXT:    .reg .b32 %r<4>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
-; SM80-NEXT:    ld.param.b32 %r1, [test_maxnum_v2_param_1];
-; SM80-NEXT:    ld.param.b32 %r2, [test_maxnum_v2_param_0];
-; SM80-NEXT:    max.bf16x2 %r3, %r2, %r1;
+; SM80-NEXT:    ld.param.b32 %r1, [test_maxnum_v2_param_0];
+; SM80-NEXT:    ld.param.b32 %r2, [test_maxnum_v2_param_1];
+; SM80-NEXT:    max.bf16x2 %r3, %r1, %r2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-NEXT:    ret;
 ;
@@ -1610,9 +1610,9 @@ define <2 x bfloat> @test_maxnum_v2(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; SM80-FTZ-NEXT:    .reg .b32 %r<4>;
 ; SM80-FTZ-EMPTY:
 ; SM80-FTZ-NEXT:  // %bb.0:
-; SM80-FTZ-NEXT:    ld.param.b32 %r1, [test_maxnum_v2_param_1];
-; SM80-FTZ-NEXT:    ld.param.b32 %r2, [test_maxnum_v2_param_0];
-; SM80-FTZ-NEXT:    max.bf16x2 %r3, %r2, %r1;
+; SM80-FTZ-NEXT:    ld.param.b32 %r1, [test_maxnum_v2_param_0];
+; SM80-FTZ-NEXT:    ld.param.b32 %r2, [test_maxnum_v2_param_1];
+; SM80-FTZ-NEXT:    max.bf16x2 %r3, %r1, %r2;
 ; SM80-FTZ-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-FTZ-NEXT:    ret;
 ;
@@ -1621,9 +1621,9 @@ define <2 x bfloat> @test_maxnum_v2(<2 x bfloat> %a, <2 x bfloat> %b) {
 ; SM90-NEXT:    .reg .b32 %r<4>;
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b32 %r1, [test_maxnum_v2_param_1];
-; SM90-NEXT:    ld.param.b32 %r2, [test_maxnum_v2_param_0];
-; SM90-NEXT:    max.bf16x2 %r3, %r2, %r1;
+; SM90-NEXT:    ld.param.b32 %r1, [test_maxnum_v2_param_0];
+; SM90-NEXT:    ld.param.b32 %r2, [test_maxnum_v2_param_1];
+; SM90-NEXT:    max.bf16x2 %r3, %r1, %r2;
 ; SM90-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM90-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.maxnum.v2bf16(<2 x bfloat> %a, <2 x bfloat> %b)
diff --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
index ec993aa15a85a..a250190a7b1ab 100644
--- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
@@ -79,9 +79,9 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.b32 %r1, [test_fsubx2_param_0];
-; SM80-NEXT:    ld.param.b32 %r2, [test_fsubx2_param_1];
-; SM80-NEXT:    mov.b32 %r3, -1082081408;
-; SM80-NEXT:    fma.rn.bf16x2 %r4, %r2, %r3, %r1;
+; SM80-NEXT:    mov.b32 %r2, -1082081408;
+; SM80-NEXT:    ld.param.b32 %r3, [test_fsubx2_param_1];
+; SM80-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r4;
 ; SM80-NEXT:    ret;
 ;
@@ -90,9 +90,9 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM90-NEXT:    .reg .b32 %r<4>;
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b32 %r1, [test_fsubx2_param_1];
-; SM90-NEXT:    ld.param.b32 %r2, [test_fsubx2_param_0];
-; SM90-NEXT:    sub.rn.bf16x2 %r3, %r2, %r1;
+; SM90-NEXT:    ld.param.b32 %r1, [test_fsubx2_param_0];
+; SM90-NEXT:    ld.param.b32 %r2, [test_fsubx2_param_1];
+; SM90-NEXT:    sub.rn.bf16x2 %r3, %r1, %r2;
 ; SM90-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM90-NEXT:    ret;
   %r = fsub <2 x bfloat> %a, %b
@@ -105,10 +105,10 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    .reg .b32 %r<5>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
-; SM80-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_1];
-; SM80-NEXT:    ld.param.b32 %r2, [test_fmulx2_param_0];
-; SM80-NEXT:    mov.b32 %r3, -2147450880;
-; SM80-NEXT:    fma.rn.bf16x2 %r4, %r2, %r1, %r3;
+; SM80-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_0];
+; SM80-NEXT:    mov.b32 %r2, -2147450880;
+; SM80-NEXT:    ld.param.b32 %r3, [test_fmulx2_param_1];
+; SM80-NEXT:    fma.rn.bf16x2 %r4, %r1, %r3, %r2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r4;
 ; SM80-NEXT:    ret;
 ;
@@ -117,9 +117,9 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM90-NEXT:    .reg .b32 %r<4>;
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_1];
-; SM90-NEXT:    ld.param.b32 %r2, [test_fmulx2_param_0];
-; SM90-NEXT:    mul.rn.bf16x2 %r3, %r2, %r1;
+; SM90-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_0];
+; SM90-NEXT:    ld.param.b32 %r2, [test_fmulx2_param_1];
+; SM90-NEXT:    mul.rn.bf16x2 %r3, %r1, %r2;
 ; SM90-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM90-NEXT:    ret;
   %r = fmul <2 x bfloat> %a, %b
@@ -157,7 +157,7 @@ define <2 x bfloat> @test_fneg(<2 x bfloat> %a) #0 {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_fneg_param_0];
-; CHECK-NEXT:    xor.b32 %r2, %r1, -2147450880;
+; CHECK-NEXT:    neg.bf16x2 %r2, %r1;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r2;
 ; CHECK-NEXT:    ret;
   %r = fneg <2 x bfloat> %a
@@ -243,9 +243,9 @@ define <2 x bfloat> @test_select(<2 x bfloat> %a, <2 x bfloat> %b, i1 zeroext %c
 ; CHECK-NEXT:    ld.param.b8 %rs1, [test_select_param_2];
 ; CHECK-NEXT:    and.b16 %rs2, %rs1, 1;
 ; CHECK-NEXT:    setp.ne.b16 %p1, %rs2, 0;
-; CHECK-NEXT:    ld.param.b32 %r1, [test_select_param_1];
-; CHECK-NEXT:    ld.param.b32 %r2, [test_select_param_0];
-; CHECK-NEXT:    selp.b32 %r3, %r2, %r1, %p1;
+; CHECK-NEXT:    ld.param.b32 %r1, [test_select_param_0];
+; CHECK-NEXT:    ld.param.b32 %r2, [test_select_param_1];
+; CHECK-NEXT:    selp.b32 %r3, %r1, %r2, %p1;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r3;
 ; CHECK-NEXT:    ret;
   %r = select i1 %c, <2 x bfloat> %a, <2 x bfloat> %b
@@ -261,18 +261,18 @@ define <2 x bfloat> @test_select_cc(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloa
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.b32 %r1, [test_select_cc_param_0];
-; SM80-NEXT:    ld.param.b32 %r2, [test_select_cc_param_1];
-; SM80-NEXT:    ld.param.b32 %r3, [test_select_cc_param_2];
-; SM80-NEXT:    ld.param.b32 %r4, [test_select_cc_param_3];
-; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; SM80-NEXT:    cvt.f32.bf16 %r5, %rs1;
-; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r3;
-; SM80-NEXT:    cvt.f32.bf16 %r6, %rs3;
-; SM80-NEXT:    setp.neu.f32 %p1, %r6, %r5;
-; SM80-NEXT:    cvt.f32.bf16 %r7, %rs2;
-; SM80-NEXT:    cvt.f32.bf16 %r8, %rs4;
-; SM80-NEXT:    setp.neu.f32 %p2, %r8, %r7;
-; SM80-NEXT:    mov.b32 {%rs5, %rs6}, %r2;
+; SM80-NEXT:    ld.param.b32 %r2, [test_select_cc_param_3];
+; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
+; SM80-NEXT:    cvt.f32.bf16 %r3, %rs1;
+; SM80-NEXT:    ld.param.b32 %r4, [test_select_cc_param_2];
+; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r4;
+; SM80-NEXT:    cvt.f32.bf16 %r5, %rs3;
+; SM80-NEXT:    setp.neu.f32 %p1, %r5, %r3;
+; SM80-NEXT:    cvt.f32.bf16 %r6, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %r7, %rs4;
+; SM80-NEXT:    setp.neu.f32 %p2, %r7, %r6;
+; SM80-NEXT:    ld.param.b32 %r8, [test_select_cc_param_1];
+; SM80-NEXT:    mov.b32 {%rs5, %rs6}, %r8;
 ; SM80-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
 ; SM80-NEXT:    selp.b16 %rs9, %rs8, %rs6, %p2;
 ; SM80-NEXT:    selp.b16 %rs10, %rs7, %rs5, %p1;
@@ -288,11 +288,11 @@ define <2 x bfloat> @test_select_cc(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloa
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
 ; SM90-NEXT:    ld.param.b32 %r1, [test_select_cc_param_0];
-; SM90-NEXT:    ld.param.b32 %r2, [test_select_cc_param_1];
+; SM90-NEXT:    ld.param.b32 %r2, [test_select_cc_param_2];
 ; SM90-NEXT:    ld.param.b32 %r3, [test_select_cc_param_3];
-; SM90-NEXT:    ld.param.b32 %r4, [test_select_cc_param_2];
-; SM90-NEXT:    setp.neu.bf16x2 %p1|%p2, %r4, %r3;
-; SM90-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
+; SM90-NEXT:    setp.neu.bf16x2 %p1|%p2, %r2, %r3;
+; SM90-NEXT:    ld.param.b32 %r4, [test_select_cc_param_1];
+; SM90-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
 ; SM90-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
 ; SM90-NEXT:    selp.b16 %rs5, %rs4, %rs2, %p2;
 ; SM90-NEXT:    selp.b16 %rs6, %rs3, %rs1, %p1;
@@ -313,13 +313,13 @@ define <2 x float> @test_select_cc_f32_bf16(<2 x float> %a, <2 x float> %b,
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_select_cc_f32_bf16_param_0];
-; SM80-NEXT:    ld.param.b32 %r3, [test_select_cc_f32_bf16_param_2];
-; SM80-NEXT:    ld.param.b32 %r4, [test_select_cc_f32_bf16_param_3];
-; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; SM80-NEXT:    cvt.f32.bf16 %r5, %rs1;
-; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r3;
+; SM80-NEXT:    ld.param.b32 %r3, [test_select_cc_f32_bf16_param_3];
+; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r3;
+; SM80-NEXT:    cvt.f32.bf16 %r4, %rs1;
+; SM80-NEXT:    ld.param.b32 %r5, [test_select_cc_f32_bf16_param_2];
+; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r5;
 ; SM80-NEXT:    cvt.f32.bf16 %r6, %rs3;
-; SM80-NEXT:    setp.neu.f32 %p1, %r6, %r5;
+; SM80-NEXT:    setp.neu.f32 %p1, %r6, %r4;
 ; SM80-NEXT:    cvt.f32.bf16 %r7, %rs2;
 ; SM80-NEXT:    cvt.f32.bf16 %r8, %rs4;
 ; SM80-NEXT:    setp.neu.f32 %p2, %r8, %r7;
@@ -336,9 +336,9 @@ define <2 x float> @test_select_cc_f32_bf16(<2 x float> %a, <2 x float> %b,
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
 ; SM90-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_select_cc_f32_bf16_param_0];
-; SM90-NEXT:    ld.param.b32 %r3, [test_select_cc_f32_bf16_param_3];
-; SM90-NEXT:    ld.param.b32 %r4, [test_select_cc_f32_bf16_param_2];
-; SM90-NEXT:    setp.neu.bf16x2 %p1|%p2, %r4, %r3;
+; SM90-NEXT:    ld.param.b32 %r3, [test_select_cc_f32_bf16_param_2];
+; SM90-NEXT:    ld.param.b32 %r4, [test_select_cc_f32_bf16_param_3];
+; SM90-NEXT:    setp.neu.bf16x2 %p1|%p2, %r3, %r4;
 ; SM90-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_select_cc_f32_bf16_param_1];
 ; SM90-NEXT:    selp.f32 %r7, %r2, %r6, %p2;
 ; SM90-NEXT:    selp.f32 %r8, %r1, %r5, %p1;
@@ -359,12 +359,12 @@ define <2 x bfloat> @test_select_cc_bf16_f32(<2 x bfloat> %a, <2 x bfloat> %b,
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_select_cc_bf16_f32_param_0];
-; CHECK-NEXT:    ld.param.b32 %r2, [test_select_cc_bf16_f32_param_1];
-; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_bf16_f32_param_2];
-; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_select_cc_bf16_f32_param_3];
-; CHECK-NEXT:    setp.neu.f32 %p1, %r3, %r5;
-; CHECK-NEXT:    setp.neu.f32 %p2, %r4, %r6;
-; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-NEXT:    ld.param.v2.b32 {%r2, %r3}, [test_select_cc_bf16_f32_param_2];
+; CHECK-NEXT:    ld.param.v2.b32 {%r4, %r5}, [test_select_cc_bf16_f32_param_3];
+; CHECK-NEXT:    setp.neu.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.neu.f32 %p2, %r3, %r5;
+; CHECK-NEXT:    ld.param.b32 %r6, [test_select_cc_bf16_f32_param_1];
+; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r6;
 ; CHECK-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
 ; CHECK-NEXT:    selp.b16 %rs5, %rs4, %rs2, %p2;
 ; CHECK-NEXT:    selp.b16 %rs6, %rs3, %rs1, %p1;
@@ -483,10 +483,10 @@ define <2 x bfloat> @test_fmuladd(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat>
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [test_fmuladd_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [test_fmuladd_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [test_fmuladd_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [test_fmuladd_param_0];
-; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [test_fmuladd_param_2];
+; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.fmuladd.f16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c)
@@ -500,7 +500,7 @@ define <2 x bfloat> @test_fabs(<2 x bfloat> %a) #0 {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_fabs_param_0];
-; CHECK-NEXT:    and.b32 %r2, %r1, 2147450879;
+; CHECK-NEXT:    abs.bf16x2 %r2, %r1;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r2;
 ; CHECK-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.fabs.f16(<2 x bfloat> %a)
@@ -513,12 +513,12 @@ define <2 x bfloat> @test_fabs_add(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    .reg .b32 %r<7>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
-; SM80-NEXT:    ld.param.b32 %r1, [test_fabs_add_param_1];
-; SM80-NEXT:    ld.param.b32 %r2, [test_fabs_add_param_0];
-; SM80-NEXT:    mov.b32 %r3, 1065369472;
-; SM80-NEXT:    fma.rn.bf16x2 %r4, %r2, %r3, %r2;
-; SM80-NEXT:    abs.bf16x2 %r5, %r4;
-; SM80-NEXT:    fma.rn.bf16x2 %r6, %r5, %r3, %r1;
+; SM80-NEXT:    ld.param.b32 %r1, [test_fabs_add_param_0];
+; SM80-NEXT:    mov.b32 %r2, 1065369472;
+; SM80-NEXT:    fma.rn.bf16x2 %r3, %r1, %r2, %r1;
+; SM80-NEXT:    ld.param.b32 %r4, [test_fabs_add_param_1];
+; SM80-NEXT:    abs.bf16x2 %r5, %r3;
+; SM80-NEXT:    fma.rn.bf16x2 %r6, %r5, %r2, %r4;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r6;
 ; SM80-NEXT:    ret;
 ;
@@ -527,11 +527,11 @@ define <2 x bfloat> @test_fabs_add(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM90-NEXT:    .reg .b32 %r<6>;
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b32 %r1, [test_fabs_add_param_1];
-; SM90-NEXT:    ld.param.b32 %r2, [test_fabs_add_param_0];
-; SM90-NEXT:    add.rn.bf16x2 %r3, %r2, %r2;
-; SM90-NEXT:    abs.bf16x2 %r4, %r3;
-; SM90-NEXT:    add.rn.bf16x2 %r5, %r4, %r1;
+; SM90-NEXT:    ld.param.b32 %r1, [test_fabs_add_param_0];
+; SM90-NEXT:    add.rn.bf16x2 %r2, %r1, %r1;
+; SM90-NEXT:    ld.param.b32 %r3, [test_fabs_add_param_1];
+; SM90-NEXT:    abs.bf16x2 %r4, %r2;
+; SM90-NEXT:    add.rn.bf16x2 %r5, %r4, %r3;
 ; SM90-NEXT:    st.param.b32 [func_retval0], %r5;
 ; SM90-NEXT:    ret;
   %s = fadd <2 x bfloat> %a, %a
@@ -546,9 +546,9 @@ define <2 x bfloat> @test_minnum(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; CHECK-NEXT:    .reg .b32 %r<4>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [test_minnum_param_1];
-; CHECK-NEXT:    ld.param.b32 %r2, [test_minnum_param_0];
-; CHECK-NEXT:    min.bf16x2 %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r1, [test_minnum_param_0];
+; CHECK-NEXT:    ld.param.b32 %r2, [test_minnum_param_1];
+; CHECK-NEXT:    min.bf16x2 %r3, %r1, %r2;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r3;
 ; CHECK-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.minnum.f16(<2 x bfloat> %a, <2 x bfloat> %b)
@@ -561,9 +561,9 @@ define <2 x bfloat> @test_maxnum(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; CHECK-NEXT:    .reg .b32 %r<4>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [test_maxnum_param_1];
-; CHECK-NEXT:    ld.param.b32 %r2, [test_maxnum_param_0];
-; CHECK-NEXT:    max.bf16x2 %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r1, [test_maxnum_param_0];
+; CHECK-NEXT:    ld.param.b32 %r2, [test_maxnum_param_1];
+; CHECK-NEXT:    max.bf16x2 %r3, %r1, %r2;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r3;
 ; CHECK-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.maxnum.f16(<2 x bfloat> %a, <2 x bfloat> %b)
diff --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
index 8c89f82dbf9c1..511477c77decc 100644
--- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -614,15 +614,18 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
 ; CHECK-F16:       {
 ; CHECK-F16-NEXT:    .reg .pred %p<3>;
 ; CHECK-F16-NEXT:    .reg .b32 %r<9>;
+; CHECK-F16-NEXT:    .reg .b64 %rd<3>;
 ; CHECK-F16-EMPTY:
 ; CHECK-F16-NEXT:  // %bb.0:
-; CHECK-F16-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f32_f16_param_1];
-; CHECK-F16-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_select_cc_f32_f16_param_0];
-; CHECK-F16-NEXT:    ld.param.b32 %r6, [test_select_cc_f32_f16_param_3];
-; CHECK-F16-NEXT:    ld.param.b32 %r5, [test_select_cc_f32_f16_param_2];
-; CHECK-F16-NEXT:    setp.neu.f16x2 %p1|%p2, %r5, %r6;
-; CHECK-F16-NEXT:    selp.f32 %r7, %r2, %r4, %p2;
-; CHECK-F16-NEXT:    selp.f32 %r8, %r1, %r3, %p1;
+; CHECK-F16-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f32_f16_param_0];
+; CHECK-F16-NEXT:    mov.b64 %rd1, {%r3, %r4};
+; CHECK-F16-NEXT:    ld.param.b32 %r2, [test_select_cc_f32_f16_param_3];
+; CHECK-F16-NEXT:    ld.param.b32 %r1, [test_select_cc_f32_f16_param_2];
+; CHECK-F16-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_select_cc_f32_f16_param_1];
+; CHECK-F16-NEXT:    mov.b64 %rd2, {%r5, %r6};
+; CHECK-F16-NEXT:    setp.neu.f16x2 %p1|%p2, %r1, %r2;
+; CHECK-F16-NEXT:    selp.f32 %r7, %r4, %r6, %p2;
+; CHECK-F16-NEXT:    selp.f32 %r8, %r3, %r5, %p1;
 ; CHECK-F16-NEXT:    st.param.v2.b32 [func_retval0], {%r8, %r7};
 ; CHECK-F16-NEXT:    ret;
 ;
@@ -631,22 +634,25 @@ define <2 x float> @test_select_cc_f32_f16(<2 x float> %a, <2 x float> %b,
 ; CHECK-NOF16-NEXT:    .reg .pred %p<3>;
 ; CHECK-NOF16-NEXT:    .reg .b16 %rs<5>;
 ; CHECK-NOF16-NEXT:    .reg .b32 %r<13>;
+; CHECK-NOF16-NEXT:    .reg .b64 %rd<3>;
 ; CHECK-NOF16-EMPTY:
 ; CHECK-NOF16-NEXT:  // %bb.0:
-; CHECK-NOF16-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f32_f16_param_1];
-; CHECK-NOF16-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_select_cc_f32_f16_param_0];
-; CHECK-NOF16-NEXT:    ld.param.b32 %r6, [test_select_cc_f32_f16_param_3];
-; CHECK-NOF16-NEXT:    ld.param.b32 %r5, [test_select_cc_f32_f16_param_2];
-; CHECK-NOF16-NEXT:    mov.b32 {%rs1, %rs2}, %r6;
+; CHECK-NOF16-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f32_f16_param_0];
+; CHECK-NOF16-NEXT:    mov.b64 %rd1, {%r3, %r4};
+; CHECK-NOF16-NEXT:    ld.param.b32 %r2, [test_select_cc_f32_f16_param_3];
+; CHECK-NOF16-NEXT:    ld.param.b32 %r1, [test_select_cc_f32_f16_param_2];
+; CHECK-NOF16-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_select_cc_f32_f16_param_1];
+; CHECK-NOF16-NEXT:    mov.b64 %rd2, {%r5, %r6};
+; CHECK-NOF16-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
 ; CHECK-NOF16-NEXT:    cvt.f32.f16 %r7, %rs1;
-; CHECK-NOF16-NEXT:    mov.b32 {%rs3, %rs4}, %r5;
+; CHECK-NOF16-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
 ; CHECK-NOF16-NEXT:    cvt.f32.f16 %r8, %rs3;
 ; CHECK-NOF16-NEXT:    setp.neu.f32 %p1, %r8, %r7;
 ; CHECK-NOF16-NEXT:    cvt.f32.f16 %r9, %rs2;
 ; CHECK-NOF16-NEXT:    cvt.f32.f16 %r10, %rs4;
 ; CHECK-NOF16-NEXT:    setp.neu.f32 %p2, %r10, %r9;
-; CHECK-NOF16-NEXT:    selp.f32 %r11, %r2, %r4, %p2;
-; CHECK-NOF16-NEXT:    selp.f32 %r12, %r1, %r3, %p1;
+; CHECK-NOF16-NEXT:    selp.f32 %r11, %r4, %r6, %p2;
+; CHECK-NOF16-NEXT:    selp.f32 %r12, %r3, %r5, %p1;
 ; CHECK-NOF16-NEXT:    st.param.v2.b32 [func_retval0], {%r12, %r11};
 ; CHECK-NOF16-NEXT:    ret;
                                            <2 x half> %c, <2 x half> %d) #0 {
@@ -661,14 +667,17 @@ define <2 x half> @test_select_cc_f16_f32(<2 x half> %a, <2 x half> %b,
 ; CHECK-NEXT:    .reg .pred %p<3>;
 ; CHECK-NEXT:    .reg .b16 %rs<7>;
 ; CHECK-NEXT:    .reg .b32 %r<8>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_select_cc_f16_f32_param_3];
-; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f16_f32_param_2];
 ; CHECK-NEXT:    ld.param.b32 %r2, [test_select_cc_f16_f32_param_1];
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_select_cc_f16_f32_param_0];
-; CHECK-NEXT:    setp.neu.f32 %p1, %r3, %r5;
-; CHECK-NEXT:    setp.neu.f32 %p2, %r4, %r6;
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f16_f32_param_3];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_select_cc_f16_f32_param_2];
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    setp.neu.f32 %p1, %r5, %r3;
+; CHECK-NEXT:    setp.neu.f32 %p2, %r6, %r4;
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
 ; CHECK-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
 ; CHECK-NEXT:    selp.b16 %rs5, %rs4, %rs2, %p2;
@@ -1517,9 +1526,11 @@ define <2 x half> @test_fptrunc_2xfloat(<2 x float> %a) #0 {
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b16 %rs<3>;
 ; CHECK-NEXT:    .reg .b32 %r<4>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fptrunc_2xfloat_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
 ; CHECK-NEXT:    cvt.rn.f16.f32 %rs1, %r2;
 ; CHECK-NEXT:    cvt.rn.f16.f32 %rs2, %r1;
 ; CHECK-NEXT:    mov.b32 %r3, {%rs2, %rs1};
@@ -1943,10 +1954,12 @@ define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 {
 ; CHECK-F16:       {
 ; CHECK-F16-NEXT:    .reg .b16 %rs<3>;
 ; CHECK-F16-NEXT:    .reg .b32 %r<8>;
+; CHECK-F16-NEXT:    .reg .b64 %rd<2>;
 ; CHECK-F16-EMPTY:
 ; CHECK-F16-NEXT:  // %bb.0:
-; CHECK-F16-NEXT:    ld.param.v2.b32 {%r2, %r3}, [test_copysign_f32_param_1];
 ; CHECK-F16-NEXT:    ld.param.b32 %r1, [test_copysign_f32_param_0];
+; CHECK-F16-NEXT:    ld.param.v2.b32 {%r2, %r3}, [test_copysign_f32_param_1];
+; CHECK-F16-NEXT:    mov.b64 %rd1, {%r2, %r3};
 ; CHECK-F16-NEXT:    cvt.rn.f16.f32 %rs1, %r3;
 ; CHECK-F16-NEXT:    cvt.rn.f16.f32 %rs2, %r2;
 ; CHECK-F16-NEXT:    mov.b32 %r4, {%rs2, %rs1};
@@ -1960,10 +1973,12 @@ define <2 x half> @test_copysign_f32(<2 x half> %a, <2 x float> %b) #0 {
 ; CHECK-NOF16:       {
 ; CHECK-NOF16-NEXT:    .reg .b16 %rs<9>;
 ; CHECK-NOF16-NEXT:    .reg .b32 %r<7>;
+; CHECK-NOF16-NEXT:    .reg .b64 %rd<2>;
 ; CHECK-NOF16-EMPTY:
 ; CHECK-NOF16-NEXT:  // %bb.0:
-; CHECK-NOF16-NEXT:    ld.param.v2.b32 {%r2, %r3}, [test_copysign_f32_param_1];
 ; CHECK-NOF16-NEXT:    ld.param.b32 %r1, [test_copysign_f32_param_0];
+; CHECK-NOF16-NEXT:    ld.param.v2.b32 {%r2, %r3}, [test_copysign_f32_param_1];
+; CHECK-NOF16-NEXT:    mov.b64 %rd1, {%r2, %r3};
 ; CHECK-NOF16-NEXT:    and.b32 %r4, %r3, -2147483648;
 ; CHECK-NOF16-NEXT:    { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r4; }
 ; CHECK-NOF16-NEXT:    mov.b32 {%rs2, %rs3}, %r1;
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
new file mode 100644
index 0000000000000..4ed84ff4dce5f
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -0,0 +1,1892 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; ## Full FP32x2 support enabled by default.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100                      \
+; RUN:         -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs   \
+; RUN: | FileCheck --check-prefixes=CHECK %s
+; RUN: %if ptxas %{                                                            \
+; RUN:  llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100                     \
+; RUN:           -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN:  | %ptxas-verify -arch=sm_100                                           \
+; RUN: %}
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "nvptx64-nvidia-cuda"
+
+define <2 x float> @test_ret_const() #0 {
+; CHECK-LABEL: test_ret_const(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    mov.b32 %r1, 0f40000000;
+; CHECK-NEXT:    mov.b32 %r2, 0f3F800000;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT:    ret;
+  ret <2 x float> <float 1.0, float 2.0>
+}
+
+define float @test_extract_0(<2 x float> %a) #0 {
+; CHECK-LABEL: test_extract_0(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_extract_0_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %e = extractelement <2 x float> %a, i32 0
+  ret float %e
+}
+
+define float @test_extract_1(<2 x float> %a) #0 {
+; CHECK-LABEL: test_extract_1(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_extract_1_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r2;
+; CHECK-NEXT:    ret;
+  %e = extractelement <2 x float> %a, i32 1
+  ret float %e
+}
+
+; NOTE: disabled as -O3 miscompiles this into pointer arithmetic on
+; test_extract_i_param_0 where the symbol's address is not taken first (that
+; is, moved to a temporary)
+; define float @test_extract_i(<2 x float> %a, i64 %idx) #0 {
+;   %e = extractelement <2 x float> %a, i64 %idx
+;   ret float %e
+; }
+
+define <2 x float> @test_fadd(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fadd(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fadd_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fadd_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fadd <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_imm_0(<2 x float> %a) #0 {
+; CHECK-LABEL: test_fadd_imm_0(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fadd_imm_0_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    mov.b32 %r3, 0f40000000;
+; CHECK-NEXT:    mov.b32 %r4, 0f3F800000;
+; CHECK-NEXT:    mov.b64 %rd2, {%r4, %r3};
+; CHECK-NEXT:    add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fadd <2 x float> <float 1.0, float 2.0>, %a
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_imm_1(<2 x float> %a) #0 {
+; CHECK-LABEL: test_fadd_imm_1(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fadd_imm_1_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    mov.b32 %r3, 0f40000000;
+; CHECK-NEXT:    mov.b32 %r4, 0f3F800000;
+; CHECK-NEXT:    mov.b64 %rd2, {%r4, %r3};
+; CHECK-NEXT:    add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fadd <2 x float> %a, <float 1.0, float 2.0>
+  ret <2 x float> %r
+}
+
+define <4 x float> @test_fadd_v4(<4 x float> %a, <4 x float> %b) #0 {
+; CHECK-LABEL: test_fadd_v4(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<13>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [test_fadd_v4_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [test_fadd_v4_param_1];
+; CHECK-NEXT:    mov.b64 %rd4, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd3, {%r5, %r6};
+; CHECK-NEXT:    add.rn.f32x2 %rd5, %rd1, %rd3;
+; CHECK-NEXT:    add.rn.f32x2 %rd6, %rd2, %rd4;
+; CHECK-NEXT:    mov.b64 {%r9, %r10}, %rd6;
+; CHECK-NEXT:    mov.b64 {%r11, %r12}, %rd5;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0], {%r11, %r12, %r9, %r10};
+; CHECK-NEXT:    ret;
+  %r = fadd <4 x float> %a, %b
+  ret <4 x float> %r
+}
+
+define <4 x float> @test_fadd_imm_0_v4(<4 x float> %a) #0 {
+; CHECK-LABEL: test_fadd_imm_0_v4(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<13>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [test_fadd_imm_0_v4_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    mov.b32 %r5, 0f40000000;
+; CHECK-NEXT:    mov.b32 %r6, 0f3F800000;
+; CHECK-NEXT:    mov.b64 %rd3, {%r6, %r5};
+; CHECK-NEXT:    add.rn.f32x2 %rd4, %rd1, %rd3;
+; CHECK-NEXT:    mov.b32 %r7, 0f40800000;
+; CHECK-NEXT:    mov.b32 %r8, 0f40400000;
+; CHECK-NEXT:    mov.b64 %rd5, {%r8, %r7};
+; CHECK-NEXT:    add.rn.f32x2 %rd6, %rd2, %rd5;
+; CHECK-NEXT:    mov.b64 {%r9, %r10}, %rd6;
+; CHECK-NEXT:    mov.b64 {%r11, %r12}, %rd4;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0], {%r11, %r12, %r9, %r10};
+; CHECK-NEXT:    ret;
+  %r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
+  ret <4 x float> %r
+}
+
+define <4 x float> @test_fadd_imm_1_v4(<4 x float> %a) #0 {
+; CHECK-LABEL: test_fadd_imm_1_v4(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<13>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [test_fadd_imm_1_v4_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    mov.b32 %r5, 0f40000000;
+; CHECK-NEXT:    mov.b32 %r6, 0f3F800000;
+; CHECK-NEXT:    mov.b64 %rd3, {%r6, %r5};
+; CHECK-NEXT:    add.rn.f32x2 %rd4, %rd1, %rd3;
+; CHECK-NEXT:    mov.b32 %r7, 0f40800000;
+; CHECK-NEXT:    mov.b32 %r8, 0f40400000;
+; CHECK-NEXT:    mov.b64 %rd5, {%r8, %r7};
+; CHECK-NEXT:    add.rn.f32x2 %rd6, %rd2, %rd5;
+; CHECK-NEXT:    mov.b64 {%r9, %r10}, %rd6;
+; CHECK-NEXT:    mov.b64 {%r11, %r12}, %rd4;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0], {%r11, %r12, %r9, %r10};
+; CHECK-NEXT:    ret;
+  %r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
+  ret <4 x float> %r
+}
+
+define <2 x float> @test_fsub(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fsub(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fsub_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fsub_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    sub.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fsub <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fneg(<2 x float> %a) #0 {
+; CHECK-LABEL: test_fneg(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fneg_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    neg.f32 %r3, %r2;
+; CHECK-NEXT:    neg.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = fneg <2 x float> %a
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fmul(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fmul(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fmul_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fmul_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    mul.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fmul <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fdiv(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fdiv(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fdiv_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fdiv_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    div.rn.f32 %r5, %r2, %r4;
+; CHECK-NEXT:    div.rn.f32 %r6, %r1, %r3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NEXT:    ret;
+  %r = fdiv <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_frem(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_frem(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b32 %r<15>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_frem_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_frem_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    div.rn.f32 %r5, %r2, %r4;
+; CHECK-NEXT:    cvt.rzi.f32.f32 %r6, %r5;
+; CHECK-NEXT:    neg.f32 %r7, %r6;
+; CHECK-NEXT:    fma.rn.f32 %r8, %r7, %r4, %r2;
+; CHECK-NEXT:    testp.infinite.f32 %p1, %r4;
+; CHECK-NEXT:    selp.f32 %r9, %r2, %r8, %p1;
+; CHECK-NEXT:    div.rn.f32 %r10, %r1, %r3;
+; CHECK-NEXT:    cvt.rzi.f32.f32 %r11, %r10;
+; CHECK-NEXT:    neg.f32 %r12, %r11;
+; CHECK-NEXT:    fma.rn.f32 %r13, %r12, %r3, %r1;
+; CHECK-NEXT:    testp.infinite.f32 %p2, %r3;
+; CHECK-NEXT:    selp.f32 %r14, %r1, %r13, %p2;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r14, %r9};
+; CHECK-NEXT:    ret;
+  %r = frem <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-LABEL: test_fadd_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fadd_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fadd_ftz_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fadd <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_imm_0_ftz(<2 x float> %a) #2 {
+; CHECK-LABEL: test_fadd_imm_0_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fadd_imm_0_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    mov.b32 %r3, 0f40000000;
+; CHECK-NEXT:    mov.b32 %r4, 0f3F800000;
+; CHECK-NEXT:    mov.b64 %rd2, {%r4, %r3};
+; CHECK-NEXT:    add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fadd <2 x float> <float 1.0, float 2.0>, %a
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fadd_imm_1_ftz(<2 x float> %a) #2 {
+; CHECK-LABEL: test_fadd_imm_1_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fadd_imm_1_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    mov.b32 %r3, 0f40000000;
+; CHECK-NEXT:    mov.b32 %r4, 0f3F800000;
+; CHECK-NEXT:    mov.b64 %rd2, {%r4, %r3};
+; CHECK-NEXT:    add.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fadd <2 x float> %a, <float 1.0, float 2.0>
+  ret <2 x float> %r
+}
+
+define <4 x float> @test_fadd_v4_ftz(<4 x float> %a, <4 x float> %b) #2 {
+; CHECK-LABEL: test_fadd_v4_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<13>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [test_fadd_v4_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [test_fadd_v4_ftz_param_1];
+; CHECK-NEXT:    mov.b64 %rd4, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd3, {%r5, %r6};
+; CHECK-NEXT:    add.rn.ftz.f32x2 %rd5, %rd1, %rd3;
+; CHECK-NEXT:    add.rn.ftz.f32x2 %rd6, %rd2, %rd4;
+; CHECK-NEXT:    mov.b64 {%r9, %r10}, %rd6;
+; CHECK-NEXT:    mov.b64 {%r11, %r12}, %rd5;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0], {%r11, %r12, %r9, %r10};
+; CHECK-NEXT:    ret;
+  %r = fadd <4 x float> %a, %b
+  ret <4 x float> %r
+}
+
+define <4 x float> @test_fadd_imm_0_v4_ftz(<4 x float> %a) #2 {
+; CHECK-LABEL: test_fadd_imm_0_v4_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<13>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [test_fadd_imm_0_v4_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    mov.b32 %r5, 0f40000000;
+; CHECK-NEXT:    mov.b32 %r6, 0f3F800000;
+; CHECK-NEXT:    mov.b64 %rd3, {%r6, %r5};
+; CHECK-NEXT:    add.rn.ftz.f32x2 %rd4, %rd1, %rd3;
+; CHECK-NEXT:    mov.b32 %r7, 0f40800000;
+; CHECK-NEXT:    mov.b32 %r8, 0f40400000;
+; CHECK-NEXT:    mov.b64 %rd5, {%r8, %r7};
+; CHECK-NEXT:    add.rn.ftz.f32x2 %rd6, %rd2, %rd5;
+; CHECK-NEXT:    mov.b64 {%r9, %r10}, %rd6;
+; CHECK-NEXT:    mov.b64 {%r11, %r12}, %rd4;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0], {%r11, %r12, %r9, %r10};
+; CHECK-NEXT:    ret;
+  %r = fadd <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, %a
+  ret <4 x float> %r
+}
+
+define <4 x float> @test_fadd_imm_1_v4_ftz(<4 x float> %a) #2 {
+; CHECK-LABEL: test_fadd_imm_1_v4_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<13>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [test_fadd_imm_1_v4_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    mov.b32 %r5, 0f40000000;
+; CHECK-NEXT:    mov.b32 %r6, 0f3F800000;
+; CHECK-NEXT:    mov.b64 %rd3, {%r6, %r5};
+; CHECK-NEXT:    add.rn.ftz.f32x2 %rd4, %rd1, %rd3;
+; CHECK-NEXT:    mov.b32 %r7, 0f40800000;
+; CHECK-NEXT:    mov.b32 %r8, 0f40400000;
+; CHECK-NEXT:    mov.b64 %rd5, {%r8, %r7};
+; CHECK-NEXT:    add.rn.ftz.f32x2 %rd6, %rd2, %rd5;
+; CHECK-NEXT:    mov.b64 {%r9, %r10}, %rd6;
+; CHECK-NEXT:    mov.b64 {%r11, %r12}, %rd4;
+; CHECK-NEXT:    st.param.v4.b32 [func_retval0], {%r11, %r12, %r9, %r10};
+; CHECK-NEXT:    ret;
+  %r = fadd <4 x float> %a, <float 1.0, float 2.0, float 3.0, float 4.0>
+  ret <4 x float> %r
+}
+
+define <2 x float> @test_fsub_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-LABEL: test_fsub_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fsub_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fsub_ftz_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    sub.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fsub <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fneg_ftz(<2 x float> %a) #2 {
+; CHECK-LABEL: test_fneg_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fneg_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    neg.ftz.f32 %r3, %r2;
+; CHECK-NEXT:    neg.ftz.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = fneg <2 x float> %a
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fmul_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-LABEL: test_fmul_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fmul_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fmul_ftz_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    mul.rn.ftz.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = fmul <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fma_ftz(<2 x float> %a, <2 x float> %b, <2 x float> %c) #2 {
+; CHECK-LABEL: test_fma_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fma_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fma_ftz_param_2];
+; CHECK-NEXT:    mov.b64 %rd3, {%r3, %r4};
+; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_fma_ftz_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r5, %r6};
+; CHECK-NEXT:    fma.rn.ftz.f32x2 %rd4, %rd1, %rd2, %rd3;
+; CHECK-NEXT:    mov.b64 {%r7, %r8}, %rd4;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r7, %r8};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fdiv_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-LABEL: test_fdiv_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fdiv_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fdiv_ftz_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    div.rn.ftz.f32 %r5, %r2, %r4;
+; CHECK-NEXT:    div.rn.ftz.f32 %r6, %r1, %r3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NEXT:    ret;
+  %r = fdiv <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_frem_ftz(<2 x float> %a, <2 x float> %b) #2 {
+; CHECK-LABEL: test_frem_ftz(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b32 %r<15>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_frem_ftz_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_frem_ftz_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    div.rn.ftz.f32 %r5, %r2, %r4;
+; CHECK-NEXT:    cvt.rzi.ftz.f32.f32 %r6, %r5;
+; CHECK-NEXT:    neg.ftz.f32 %r7, %r6;
+; CHECK-NEXT:    fma.rn.ftz.f32 %r8, %r7, %r4, %r2;
+; CHECK-NEXT:    testp.infinite.f32 %p1, %r4;
+; CHECK-NEXT:    selp.f32 %r9, %r2, %r8, %p1;
+; CHECK-NEXT:    div.rn.ftz.f32 %r10, %r1, %r3;
+; CHECK-NEXT:    cvt.rzi.ftz.f32.f32 %r11, %r10;
+; CHECK-NEXT:    neg.ftz.f32 %r12, %r11;
+; CHECK-NEXT:    fma.rn.ftz.f32 %r13, %r12, %r3, %r1;
+; CHECK-NEXT:    testp.infinite.f32 %p2, %r3;
+; CHECK-NEXT:    selp.f32 %r14, %r1, %r13, %p2;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r14, %r9};
+; CHECK-NEXT:    ret;
+  %r = frem <2 x float> %a, %b
+  ret <2 x float> %r
+}
+
+define void @test_ldst_v2f32(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: test_ldst_v2f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd2, [test_ldst_v2f32_param_1];
+; CHECK-NEXT:    ld.param.b64 %rd1, [test_ldst_v2f32_param_0];
+; CHECK-NEXT:    ld.v2.b32 {%r1, %r2}, [%rd1];
+; CHECK-NEXT:    st.v2.b32 [%rd2], {%r1, %r2};
+; CHECK-NEXT:    ret;
+  %t1 = load <2 x float>, ptr %a
+  store <2 x float> %t1, ptr %b, align 32
+  ret void
+}
+
+define void @test_ldst_v3f32(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: test_ldst_v3f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd2, [test_ldst_v3f32_param_1];
+; CHECK-NEXT:    ld.param.b64 %rd1, [test_ldst_v3f32_param_0];
+; CHECK-NEXT:    ld.b64 %rd3, [%rd1];
+; CHECK-NEXT:    ld.b32 %r1, [%rd1+8];
+; CHECK-NEXT:    st.b32 [%rd2+8], %r1;
+; CHECK-NEXT:    st.b64 [%rd2], %rd3;
+; CHECK-NEXT:    ret;
+  %t1 = load <3 x float>, ptr %a
+  store <3 x float> %t1, ptr %b, align 32
+  ret void
+}
+
+define void @test_ldst_v4f32(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: test_ldst_v4f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd2, [test_ldst_v4f32_param_1];
+; CHECK-NEXT:    ld.param.b64 %rd1, [test_ldst_v4f32_param_0];
+; CHECK-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; CHECK-NEXT:    st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; CHECK-NEXT:    ret;
+  %t1 = load <4 x float>, ptr %a
+  store <4 x float> %t1, ptr %b, align 32
+  ret void
+}
+
+define void @test_ldst_v8f32(ptr %a, ptr %b) #0 {
+; CHECK-LABEL: test_ldst_v8f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd2, [test_ldst_v8f32_param_1];
+; CHECK-NEXT:    ld.param.b64 %rd1, [test_ldst_v8f32_param_0];
+; CHECK-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; CHECK-NEXT:    ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
+; CHECK-NEXT:    st.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
+; CHECK-NEXT:    st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; CHECK-NEXT:    ret;
+  %t1 = load <8 x float>, ptr %a
+  store <8 x float> %t1, ptr %b, align 32
+  ret void
+}
+
+declare <2 x float> @test_callee(<2 x float> %a, <2 x float> %b) #0
+
+define <2 x float> @test_call(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_call(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_call_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_call_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    { // callseq 0, 0
+; CHECK-NEXT:    .param .align 8 .b8 param0[8];
+; CHECK-NEXT:    st.param.b64 [param0], %rd1;
+; CHECK-NEXT:    .param .align 8 .b8 param1[8];
+; CHECK-NEXT:    st.param.b64 [param1], %rd2;
+; CHECK-NEXT:    .param .align 8 .b8 retval0[8];
+; CHECK-NEXT:    call.uni (retval0),
+; CHECK-NEXT:    test_callee,
+; CHECK-NEXT:    (
+; CHECK-NEXT:    param0,
+; CHECK-NEXT:    param1
+; CHECK-NEXT:    );
+; CHECK-NEXT:    ld.param.b64 %rd3, [retval0];
+; CHECK-NEXT:    } // callseq 0
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @test_callee(<2 x float> %a, <2 x float> %b)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_call_flipped(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_call_flipped(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_call_flipped_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_call_flipped_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    { // callseq 1, 0
+; CHECK-NEXT:    .param .align 8 .b8 param0[8];
+; CHECK-NEXT:    st.param.b64 [param0], %rd2;
+; CHECK-NEXT:    .param .align 8 .b8 param1[8];
+; CHECK-NEXT:    st.param.b64 [param1], %rd1;
+; CHECK-NEXT:    .param .align 8 .b8 retval0[8];
+; CHECK-NEXT:    call.uni (retval0),
+; CHECK-NEXT:    test_callee,
+; CHECK-NEXT:    (
+; CHECK-NEXT:    param0,
+; CHECK-NEXT:    param1
+; CHECK-NEXT:    );
+; CHECK-NEXT:    ld.param.b64 %rd3, [retval0];
+; CHECK-NEXT:    } // callseq 1
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_tailcall_flipped(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_tailcall_flipped(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_tailcall_flipped_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_tailcall_flipped_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    { // callseq 2, 0
+; CHECK-NEXT:    .param .align 8 .b8 param0[8];
+; CHECK-NEXT:    st.param.b64 [param0], %rd2;
+; CHECK-NEXT:    .param .align 8 .b8 param1[8];
+; CHECK-NEXT:    st.param.b64 [param1], %rd1;
+; CHECK-NEXT:    .param .align 8 .b8 retval0[8];
+; CHECK-NEXT:    call.uni (retval0),
+; CHECK-NEXT:    test_callee,
+; CHECK-NEXT:    (
+; CHECK-NEXT:    param0,
+; CHECK-NEXT:    param1
+; CHECK-NEXT:    );
+; CHECK-NEXT:    ld.param.b64 %rd3, [retval0];
+; CHECK-NEXT:    } // callseq 2
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = tail call <2 x float> @test_callee(<2 x float> %b, <2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_select(<2 x float> %a, <2 x float> %b, i1 zeroext %c) #0 {
+; CHECK-LABEL: test_select(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<2>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_select_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.b8 %rs1, [test_select_param_2];
+; CHECK-NEXT:    and.b16 %rs2, %rs1, 1;
+; CHECK-NEXT:    setp.ne.b16 %p1, %rs2, 0;
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    selp.b64 %rd3, %rd1, %rd2, %p1;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %r = select i1 %c, <2 x float> %a, <2 x float> %b
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_select_cc(<2 x float> %a, <2 x float> %b, <2 x float> %c, <2 x float> %d) #0 {
+; CHECK-LABEL: test_select_cc(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b32 %r<11>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_select_cc_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_param_3];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_select_cc_param_2];
+; CHECK-NEXT:    mov.b64 %rd3, {%r5, %r6};
+; CHECK-NEXT:    ld.param.v2.b32 {%r7, %r8}, [test_select_cc_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    setp.neu.f32 %p1, %r5, %r3;
+; CHECK-NEXT:    setp.neu.f32 %p2, %r6, %r4;
+; CHECK-NEXT:    selp.f32 %r9, %r2, %r8, %p2;
+; CHECK-NEXT:    selp.f32 %r10, %r1, %r7, %p1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r10, %r9};
+; CHECK-NEXT:    ret;
+  %cc = fcmp une <2 x float> %c, %d
+  %r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
+  ret <2 x float> %r
+}
+
+define <2 x double> @test_select_cc_f64_f32(<2 x double> %a, <2 x double> %b, <2 x float> %c, <2 x float> %d) #0 {
+; CHECK-LABEL: test_select_cc_f64_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<9>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [test_select_cc_f64_f32_param_1];
+; CHECK-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [test_select_cc_f64_f32_param_0];
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_select_cc_f64_f32_param_3];
+; CHECK-NEXT:    mov.b64 %rd6, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f64_f32_param_2];
+; CHECK-NEXT:    mov.b64 %rd5, {%r3, %r4};
+; CHECK-NEXT:    setp.neu.f32 %p1, %r3, %r1;
+; CHECK-NEXT:    setp.neu.f32 %p2, %r4, %r2;
+; CHECK-NEXT:    selp.f64 %rd7, %rd2, %rd4, %p2;
+; CHECK-NEXT:    selp.f64 %rd8, %rd1, %rd3, %p1;
+; CHECK-NEXT:    st.param.v2.b64 [func_retval0], {%rd8, %rd7};
+; CHECK-NEXT:    ret;
+  %cc = fcmp une <2 x float> %c, %d
+  %r = select <2 x i1> %cc, <2 x double> %a, <2 x double> %b
+  ret <2 x double> %r
+}
+
+define <2 x float> @test_select_cc_f32_f64(<2 x float> %a, <2 x float> %b, <2 x double> %c, <2 x double> %d) #0 {
+; CHECK-LABEL: test_select_cc_f32_f64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_select_cc_f32_f64_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b64 {%rd5, %rd6}, [test_select_cc_f32_f64_param_3];
+; CHECK-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [test_select_cc_f32_f64_param_2];
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_select_cc_f32_f64_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.neu.f64 %p1, %rd3, %rd5;
+; CHECK-NEXT:    setp.neu.f64 %p2, %rd4, %rd6;
+; CHECK-NEXT:    selp.f32 %r5, %r2, %r4, %p2;
+; CHECK-NEXT:    selp.f32 %r6, %r1, %r3, %p1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NEXT:    ret;
+  %cc = fcmp une <2 x double> %c, %d
+  %r = select <2 x i1> %cc, <2 x float> %a, <2 x float> %b
+  ret <2 x float> %r
+}
+
+define <2 x i1> @test_fcmp_une(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_une(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_une_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_une_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.neu.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.neu.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp une <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ueq(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_ueq(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_ueq_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_ueq_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.equ.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.equ.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp ueq <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ugt(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_ugt(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_ugt_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_ugt_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.gtu.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.gtu.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp ugt <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_uge(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_uge(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_uge_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_uge_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.geu.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.geu.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp uge <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ult(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_ult(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_ult_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_ult_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.ltu.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.ltu.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp ult <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ule(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_ule(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_ule_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_ule_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.leu.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.leu.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp ule <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_uno(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_uno(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_uno_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_uno_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.nan.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.nan.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp uno <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_one(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_one(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_one_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_one_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.ne.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.ne.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp one <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_oeq(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_oeq(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_oeq_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_oeq_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.eq.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.eq.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp oeq <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ogt(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_ogt(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_ogt_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_ogt_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.gt.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.gt.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp ogt <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_oge(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_oge(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_oge_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_oge_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.ge.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.ge.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp oge <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_olt(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_olt(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_olt_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_olt_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.lt.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.lt.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp olt <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ole(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_ole(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_ole_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_ole_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.le.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.le.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp ole <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i1> @test_fcmp_ord(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_fcmp_ord(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fcmp_ord_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fcmp_ord_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    setp.num.f32 %p1, %r2, %r4;
+; CHECK-NEXT:    setp.num.f32 %p2, %r1, %r3;
+; CHECK-NEXT:    selp.b16 %rs1, -1, 0, %p2;
+; CHECK-NEXT:    st.param.b8 [func_retval0], %rs1;
+; CHECK-NEXT:    selp.b16 %rs2, -1, 0, %p1;
+; CHECK-NEXT:    st.param.b8 [func_retval0+1], %rs2;
+; CHECK-NEXT:    ret;
+  %r = fcmp ord <2 x float> %a, %b
+  ret <2 x i1> %r
+}
+
+define <2 x i32> @test_fptosi_i32(<2 x float> %a) #0 {
+; CHECK-LABEL: test_fptosi_i32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fptosi_i32_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rzi.s32.f32 %r3, %r2;
+; CHECK-NEXT:    cvt.rzi.s32.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = fptosi <2 x float> %a to <2 x i32>
+  ret <2 x i32> %r
+}
+
+define <2 x i64> @test_fptosi_i64(<2 x float> %a) #0 {
+; CHECK-LABEL: test_fptosi_i64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fptosi_i64_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rzi.s64.f32 %rd2, %r2;
+; CHECK-NEXT:    cvt.rzi.s64.f32 %rd3, %r1;
+; CHECK-NEXT:    st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-NEXT:    ret;
+  %r = fptosi <2 x float> %a to <2 x i64>
+  ret <2 x i64> %r
+}
+
+define <2 x i32> @test_fptoui_2xi32(<2 x float> %a) #0 {
+; CHECK-LABEL: test_fptoui_2xi32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fptoui_2xi32_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rzi.u32.f32 %r3, %r2;
+; CHECK-NEXT:    cvt.rzi.u32.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = fptoui <2 x float> %a to <2 x i32>
+  ret <2 x i32> %r
+}
+
+define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 {
+; CHECK-LABEL: test_fptoui_2xi64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fptoui_2xi64_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rzi.u64.f32 %rd2, %r2;
+; CHECK-NEXT:    cvt.rzi.u64.f32 %rd3, %r1;
+; CHECK-NEXT:    st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-NEXT:    ret;
+  %r = fptoui <2 x float> %a to <2 x i64>
+  ret <2 x i64> %r
+}
+
+define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 {
+; CHECK-LABEL: test_uitofp_2xi32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
+; CHECK-NEXT:    cvt.rn.f32.u32 %r3, %r2;
+; CHECK-NEXT:    cvt.rn.f32.u32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = uitofp <2 x i32> %a to <2 x float>
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 {
+; CHECK-LABEL: test_uitofp_2xi64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [test_uitofp_2xi64_param_0];
+; CHECK-NEXT:    cvt.rn.f32.u64 %r1, %rd2;
+; CHECK-NEXT:    cvt.rn.f32.u64 %r2, %rd1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT:    ret;
+  %r = uitofp <2 x i64> %a to <2 x float>
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 {
+; CHECK-LABEL: test_sitofp_2xi32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
+; CHECK-NEXT:    cvt.rn.f32.s32 %r3, %r2;
+; CHECK-NEXT:    cvt.rn.f32.s32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = sitofp <2 x i32> %a to <2 x float>
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_sitofp_2xi64(<2 x i64> %a) #0 {
+; CHECK-LABEL: test_sitofp_2xi64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [test_sitofp_2xi64_param_0];
+; CHECK-NEXT:    cvt.rn.f32.s64 %r1, %rd2;
+; CHECK-NEXT:    cvt.rn.f32.s64 %r2, %rd1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT:    ret;
+  %r = sitofp <2 x i64> %a to <2 x float>
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_uitofp_2xi32_fadd(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_uitofp_2xi32_fadd_param_1];
+; CHECK-NEXT:    mov.b64 %rd1, {%r3, %r4};
+; CHECK-NEXT:    cvt.rn.f32.u32 %r5, %r2;
+; CHECK-NEXT:    cvt.rn.f32.u32 %r6, %r1;
+; CHECK-NEXT:    mov.b64 %rd2, {%r6, %r5};
+; CHECK-NEXT:    add.rn.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    mov.b64 {%r7, %r8}, %rd3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r7, %r8};
+; CHECK-NEXT:    ret;
+  %c = uitofp <2 x i32> %a to <2 x float>
+  %r = fadd <2 x float> %b, %c
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fptrunc_2xdouble(<2 x double> %a) #0 {
+; CHECK-LABEL: test_fptrunc_2xdouble(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [test_fptrunc_2xdouble_param_0];
+; CHECK-NEXT:    cvt.rn.f32.f64 %r1, %rd2;
+; CHECK-NEXT:    cvt.rn.f32.f64 %r2, %rd1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT:    ret;
+  %r = fptrunc <2 x double> %a to <2 x float>
+  ret <2 x float> %r
+}
+
+define <2 x double> @test_fpext_2xdouble(<2 x float> %a) #0 {
+; CHECK-LABEL: test_fpext_2xdouble(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fpext_2xdouble_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.f64.f32 %rd2, %r2;
+; CHECK-NEXT:    cvt.f64.f32 %rd3, %r1;
+; CHECK-NEXT:    st.param.v2.b64 [func_retval0], {%rd3, %rd2};
+; CHECK-NEXT:    ret;
+  %r = fpext <2 x float> %a to <2 x double>
+  ret <2 x double> %r
+}
+
+define <2 x i32> @test_bitcast_2xfloat_to_2xi32(<2 x float> %a) #0 {
+; CHECK-LABEL: test_bitcast_2xfloat_to_2xi32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xfloat_to_2xi32_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    mov.b64 {_, %r3}, %rd1;
+; CHECK-NEXT:    cvt.u32.u64 %r4, %rd1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = bitcast <2 x float> %a to <2 x i32>
+  ret <2 x i32> %r
+}
+
+define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 {
+; CHECK-LABEL: test_bitcast_2xi32_to_2xfloat(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0];
+; CHECK-NEXT:    cvt.u64.u32 %rd1, %r1;
+; CHECK-NEXT:    cvt.u64.u32 %rd2, %r2;
+; CHECK-NEXT:    shl.b64 %rd3, %rd2, 32;
+; CHECK-NEXT:    or.b64 %rd4, %rd1, %rd3;
+; CHECK-NEXT:    mov.b64 {%r3, %r4}, %rd4;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r3, %r4};
+; CHECK-NEXT:    ret;
+  %r = bitcast <2 x i32> %a to <2 x float>
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_bitcast_double_to_2xfloat(double %a) #0 {
+; CHECK-LABEL: test_bitcast_double_to_2xfloat(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [test_bitcast_double_to_2xfloat_param_0];
+; CHECK-NEXT:    mov.b64 {%r1, %r2}, %rd1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r1, %r2};
+; CHECK-NEXT:    ret;
+  %r = bitcast double %a to <2 x float>
+  ret <2 x float> %r
+}
+
+define double @test_bitcast_2xfloat_to_double(<2 x float> %a) #0 {
+; CHECK-LABEL: test_bitcast_2xfloat_to_double(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xfloat_to_double_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    st.param.b64 [func_retval0], %rd1;
+; CHECK-NEXT:    ret;
+  %r = bitcast <2 x float> %a to double
+  ret double %r
+}
+
+define <2 x float> @test_sqrt(<2 x float> %a) #0 {
+; CHECK-LABEL: test_sqrt(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_sqrt_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    sqrt.rn.f32 %r3, %r2;
+; CHECK-NEXT:    sqrt.rn.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.sqrt(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_powi(
+;define <2 x float> @test_powi(<2 x float> %a, <2 x i32> %b) #0 {
+;  %r = call <2 x float> @llvm.powi.i32(<2 x float> %a, <2 x i32> %b)
+;  ret <2 x float> %r
+;}
+
+define <2 x float> @test_sin(<2 x float> %a) #0 #1 {
+; CHECK-LABEL: test_sin(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_sin_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    sin.approx.f32 %r3, %r2;
+; CHECK-NEXT:    sin.approx.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.sin(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_cos(<2 x float> %a) #0 #1 {
+; CHECK-LABEL: test_cos(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_cos_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cos.approx.f32 %r3, %r2;
+; CHECK-NEXT:    cos.approx.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.cos(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_pow(
+;define <2 x float> @test_pow(<2 x float> %a, <2 x float> %b) #0 {
+;  %r = call <2 x float> @llvm.pow(<2 x float> %a, <2 x float> %b)
+;  ret <2 x float> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp(
+;define <2 x float> @test_exp(<2 x float> %a) #0 {
+;  %r = call <2 x float> @llvm.exp(<2 x float> %a)
+;  ret <2 x float> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_exp2(
+;define <2 x float> @test_exp2(<2 x float> %a) #0 {
+;  %r = call <2 x float> @llvm.exp2(<2 x float> %a)
+;  ret <2 x float> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log(
+;define <2 x float> @test_log(<2 x float> %a) #0 {
+;  %r = call <2 x float> @llvm.log(<2 x float> %a)
+;  ret <2 x float> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log10(
+;define <2 x float> @test_log10(<2 x float> %a) #0 {
+;  %r = call <2 x float> @llvm.log10(<2 x float> %a)
+;  ret <2 x float> %r
+;}
+
+;;; Can't do this yet: requires libcall.
+; XCHECK-LABEL: test_log2(
+;define <2 x float> @test_log2(<2 x float> %a) #0 {
+;  %r = call <2 x float> @llvm.log2(<2 x float> %a)
+;  ret <2 x float> %r
+;}
+
+
+define <2 x float> @test_fma(<2 x float> %a, <2 x float> %b, <2 x float> %c) #0 {
+; CHECK-LABEL: test_fma(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fma_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fma_param_2];
+; CHECK-NEXT:    mov.b64 %rd3, {%r3, %r4};
+; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_fma_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r5, %r6};
+; CHECK-NEXT:    fma.rn.f32x2 %rd4, %rd1, %rd2, %rd3;
+; CHECK-NEXT:    mov.b64 {%r7, %r8}, %rd4;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r7, %r8};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.fma(<2 x float> %a, <2 x float> %b, <2 x float> %c)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fabs(<2 x float> %a) #0 {
+; CHECK-LABEL: test_fabs(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fabs_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    abs.f32 %r3, %r2;
+; CHECK-NEXT:    abs.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.fabs(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_minnum(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_minnum(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_minnum_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_minnum_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    min.f32 %r5, %r2, %r4;
+; CHECK-NEXT:    min.f32 %r6, %r1, %r3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.minnum(<2 x float> %a, <2 x float> %b)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_maxnum(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_maxnum(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_maxnum_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_maxnum_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    max.f32 %r5, %r2, %r4;
+; CHECK-NEXT:    max.f32 %r6, %r1, %r3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.maxnum(<2 x float> %a, <2 x float> %b)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_copysign(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_copysign(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_copysign_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_copysign_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    copysign.f32 %r5, %r4, %r2;
+; CHECK-NEXT:    copysign.f32 %r6, %r3, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r6, %r5};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.copysign(<2 x float> %a, <2 x float> %b)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_copysign_f64(<2 x float> %a, <2 x double> %b) #0 {
+; CHECK-LABEL: test_copysign_f64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<3>;
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-NEXT:    .reg .b64 %rd<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_copysign_f64_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b64 {%rd2, %rd3}, [test_copysign_f64_param_1];
+; CHECK-NEXT:    abs.f32 %r3, %r2;
+; CHECK-NEXT:    neg.f32 %r4, %r3;
+; CHECK-NEXT:    shr.u64 %rd4, %rd3, 63;
+; CHECK-NEXT:    and.b64 %rd5, %rd4, 1;
+; CHECK-NEXT:    setp.ne.b64 %p1, %rd5, 0;
+; CHECK-NEXT:    selp.f32 %r5, %r4, %r3, %p1;
+; CHECK-NEXT:    abs.f32 %r6, %r1;
+; CHECK-NEXT:    neg.f32 %r7, %r6;
+; CHECK-NEXT:    shr.u64 %rd6, %rd2, 63;
+; CHECK-NEXT:    and.b64 %rd7, %rd6, 1;
+; CHECK-NEXT:    setp.ne.b64 %p2, %rd7, 0;
+; CHECK-NEXT:    selp.f32 %r8, %r7, %r6, %p2;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r8, %r5};
+; CHECK-NEXT:    ret;
+  %tb = fptrunc <2 x double> %b to <2 x float>
+  %r = call <2 x float> @llvm.copysign(<2 x float> %a, <2 x float> %tb)
+  ret <2 x float> %r
+}
+
+define <2 x double> @test_copysign_extended(<2 x float> %a, <2 x float> %b) #0 {
+; CHECK-LABEL: test_copysign_extended(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_copysign_extended_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_copysign_extended_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    copysign.f32 %r5, %r3, %r1;
+; CHECK-NEXT:    copysign.f32 %r6, %r4, %r2;
+; CHECK-NEXT:    cvt.f64.f32 %rd3, %r6;
+; CHECK-NEXT:    cvt.f64.f32 %rd4, %r5;
+; CHECK-NEXT:    st.param.v2.b64 [func_retval0], {%rd4, %rd3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.copysign(<2 x float> %a, <2 x float> %b)
+  %xr = fpext <2 x float> %r to <2 x double>
+  ret <2 x double> %xr
+}
+
+define <2 x float> @test_floor(<2 x float> %a) #0 {
+; CHECK-LABEL: test_floor(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_floor_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rmi.f32.f32 %r3, %r2;
+; CHECK-NEXT:    cvt.rmi.f32.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.floor(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_ceil(<2 x float> %a) #0 {
+; CHECK-LABEL: test_ceil(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_ceil_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rpi.f32.f32 %r3, %r2;
+; CHECK-NEXT:    cvt.rpi.f32.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.ceil(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_trunc(<2 x float> %a) #0 {
+; CHECK-LABEL: test_trunc(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_trunc_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rzi.f32.f32 %r3, %r2;
+; CHECK-NEXT:    cvt.rzi.f32.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.trunc(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_rint(<2 x float> %a) #0 {
+; CHECK-LABEL: test_rint(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_rint_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rni.f32.f32 %r3, %r2;
+; CHECK-NEXT:    cvt.rni.f32.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.rint(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_nearbyint(<2 x float> %a) #0 {
+; CHECK-LABEL: test_nearbyint(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_nearbyint_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rni.f32.f32 %r3, %r2;
+; CHECK-NEXT:    cvt.rni.f32.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.nearbyint(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_roundeven(<2 x float> %a) #0 {
+; CHECK-LABEL: test_roundeven(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_roundeven_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    cvt.rni.f32.f32 %r3, %r2;
+; CHECK-NEXT:    cvt.rni.f32.f32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.roundeven(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+; check the use of sign mask and 0.5 to implement round
+define <2 x float> @test_round(<2 x float> %a) #0 {
+; CHECK-LABEL: test_round(
+; CHECK:       {
+; CHECK-NEXT:    .reg .pred %p<5>;
+; CHECK-NEXT:    .reg .b32 %r<19>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_round_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    and.b32 %r3, %r2, -2147483648;
+; CHECK-NEXT:    or.b32 %r4, %r3, 1056964608;
+; CHECK-NEXT:    add.rn.f32 %r5, %r2, %r4;
+; CHECK-NEXT:    cvt.rzi.f32.f32 %r6, %r5;
+; CHECK-NEXT:    abs.f32 %r7, %r2;
+; CHECK-NEXT:    setp.gt.f32 %p1, %r7, 0f4B000000;
+; CHECK-NEXT:    selp.f32 %r8, %r2, %r6, %p1;
+; CHECK-NEXT:    cvt.rzi.f32.f32 %r9, %r2;
+; CHECK-NEXT:    setp.lt.f32 %p2, %r7, 0f3F000000;
+; CHECK-NEXT:    selp.f32 %r10, %r9, %r8, %p2;
+; CHECK-NEXT:    and.b32 %r11, %r1, -2147483648;
+; CHECK-NEXT:    or.b32 %r12, %r11, 1056964608;
+; CHECK-NEXT:    add.rn.f32 %r13, %r1, %r12;
+; CHECK-NEXT:    cvt.rzi.f32.f32 %r14, %r13;
+; CHECK-NEXT:    abs.f32 %r15, %r1;
+; CHECK-NEXT:    setp.gt.f32 %p3, %r15, 0f4B000000;
+; CHECK-NEXT:    selp.f32 %r16, %r1, %r14, %p3;
+; CHECK-NEXT:    cvt.rzi.f32.f32 %r17, %r1;
+; CHECK-NEXT:    setp.lt.f32 %p4, %r15, 0f3F000000;
+; CHECK-NEXT:    selp.f32 %r18, %r17, %r16, %p4;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r18, %r10};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.round(<2 x float> %a)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_fmuladd(<2 x float> %a, <2 x float> %b, <2 x float> %c) #0 {
+; CHECK-LABEL: test_fmuladd(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fmuladd_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [test_fmuladd_param_2];
+; CHECK-NEXT:    mov.b64 %rd3, {%r3, %r4};
+; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [test_fmuladd_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r5, %r6};
+; CHECK-NEXT:    fma.rn.f32x2 %rd4, %rd1, %rd2, %rd3;
+; CHECK-NEXT:    mov.b64 {%r7, %r8}, %rd4;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r7, %r8};
+; CHECK-NEXT:    ret;
+  %r = call <2 x float> @llvm.fmuladd(<2 x float> %a, <2 x float> %b, <2 x float> %c)
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_shufflevector(<2 x float> %a) #0 {
+; CHECK-LABEL: test_shufflevector(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<3>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_shufflevector_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT:    ret;
+  %s = shufflevector <2 x float> %a, <2 x float> undef, <2 x i32> <i32 1, i32 0>
+  ret <2 x float> %s
+}
+
+define <2 x float> @test_insertelement(<2 x float> %a, float %x) #0 {
+; CHECK-LABEL: test_insertelement(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<4>;
+; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r2, %r3}, [test_insertelement_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r2, %r3};
+; CHECK-NEXT:    ld.param.b32 %r1, [test_insertelement_param_1];
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r2, %r1};
+; CHECK-NEXT:    ret;
+  %i = insertelement <2 x float> %a, float %x, i64 1
+  ret <2 x float> %i
+}
+
+define <2 x float> @test_sitofp_2xi32_to_2xfloat(<2 x i32> %a) #0 {
+; CHECK-LABEL: test_sitofp_2xi32_to_2xfloat(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_to_2xfloat_param_0];
+; CHECK-NEXT:    cvt.rn.f32.s32 %r3, %r2;
+; CHECK-NEXT:    cvt.rn.f32.s32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = sitofp <2 x i32> %a to <2 x float>
+  ret <2 x float> %r
+}
+
+define <2 x float> @test_uitofp_2xi32_to_2xfloat(<2 x i32> %a) #0 {
+; CHECK-LABEL: test_uitofp_2xi32_to_2xfloat(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_to_2xfloat_param_0];
+; CHECK-NEXT:    cvt.rn.f32.u32 %r3, %r2;
+; CHECK-NEXT:    cvt.rn.f32.u32 %r4, %r1;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NEXT:    ret;
+  %r = uitofp <2 x i32> %a to <2 x float>
+  ret <2 x float> %r
+}
+
+attributes #0 = { nounwind }
+attributes #1 = { "unsafe-fp-math" = "true" }
+attributes #2 = { "denormal-fp-math"="preserve-sign" }
diff --git a/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll b/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
index 9c11f169a89df..dff8483af5237 100644
--- a/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
@@ -502,10 +502,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans(<2 x half> %a, <2 x half> %b, <2 x
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -514,10 +514,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans(<2 x half> %a, <2 x half> %b, <2 x
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -528,10 +528,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans(<2 x half> %a, <2 x half> %b, <2 x
 ; CHECK-SM70-NEXT:    .reg .b32 %r<7>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 %r5, 0;
 ; CHECK-SM70-NEXT:    setp.gt.f16x2 %p1|%p2, %r4, %r5;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
@@ -554,10 +554,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans_multiple_uses_of_fma(<2 x half> %a
 ; CHECK-NEXT:    .reg .b32 %r<10>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 0;
 ; CHECK-NEXT:    max.f16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    mov.b32 %r7, 1191200512;
@@ -571,10 +571,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans_multiple_uses_of_fma(<2 x half> %a
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<10>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 0;
 ; CHECK-FTZ-NEXT:    max.ftz.f16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    mov.b32 %r7, 1191200512;
@@ -590,10 +590,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans_multiple_uses_of_fma(<2 x half> %a
 ; CHECK-SM70-NEXT:    .reg .b32 %r<10>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 %r5, 0;
 ; CHECK-SM70-NEXT:    setp.gt.f16x2 %p1|%p2, %r4, %r5;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
@@ -620,10 +620,10 @@ define <2 x half> @fma_f16x2_expanded_unsafe_with_nans(<2 x half> %a, <2 x half>
 ; CHECK-NEXT:    .reg .b32 %r<7>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_unsafe_with_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_unsafe_with_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_unsafe_with_nans_param_0];
-; CHECK-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 0;
 ; CHECK-NEXT:    max.f16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r6;
@@ -634,10 +634,10 @@ define <2 x half> @fma_f16x2_expanded_unsafe_with_nans(<2 x half> %a, <2 x half>
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<7>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_unsafe_with_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_unsafe_with_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_unsafe_with_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 0;
 ; CHECK-FTZ-NEXT:    max.ftz.f16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r6;
@@ -650,10 +650,10 @@ define <2 x half> @fma_f16x2_expanded_unsafe_with_nans(<2 x half> %a, <2 x half>
 ; CHECK-SM70-NEXT:    .reg .b32 %r<7>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_unsafe_with_nans_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_unsafe_with_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_unsafe_with_nans_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 %r5, 0;
 ; CHECK-SM70-NEXT:    setp.gt.f16x2 %p1|%p2, %r4, %r5;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
@@ -675,10 +675,10 @@ define <2 x half> @fma_f16x2_expanded_maxnum_no_nans(<2 x half> %a, <2 x half> %
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -687,10 +687,10 @@ define <2 x half> @fma_f16x2_expanded_maxnum_no_nans(<2 x half> %a, <2 x half> %
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -700,10 +700,10 @@ define <2 x half> @fma_f16x2_expanded_maxnum_no_nans(<2 x half> %a, <2 x half> %
 ; CHECK-SM70-NEXT:    .reg .b32 %r<10>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
 ; CHECK-SM70-NEXT:    cvt.f32.f16 %r5, %rs2;
 ; CHECK-SM70-NEXT:    max.f32 %r6, %r5, 0f00000000;
@@ -726,10 +726,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_unsafe_with_nans(<2 x bfloat> %a, <2 x
 ; CHECK-NEXT:    .reg .b32 %r<7>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_unsafe_with_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_unsafe_with_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_unsafe_with_nans_param_0];
-; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 0;
 ; CHECK-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r6;
@@ -740,10 +740,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_unsafe_with_nans(<2 x bfloat> %a, <2 x
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<7>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_unsafe_with_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_unsafe_with_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_unsafe_with_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_unsafe_with_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 0;
 ; CHECK-FTZ-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r6;
@@ -812,10 +812,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> %
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -824,10 +824,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> %
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -895,10 +895,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
 ; CHECK-NEXT:    .reg .b32 %r<11>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 0;
 ; CHECK-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    mov.b32 %r7, 1065369472;
@@ -914,10 +914,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<24>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 0;
 ; CHECK-FTZ-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
@@ -1043,10 +1043,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_maxnum_no_nans(<2 x bfloat> %a, <2 x bf
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -1055,10 +1055,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_maxnum_no_nans(<2 x bfloat> %a, <2 x bf
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
diff --git a/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll b/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
index c725b797526a3..d0af56e12dbaa 100644
--- a/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
@@ -364,10 +364,10 @@ define <2 x half> @fma_f16x2_no_nans(<2 x half> %a, <2 x half> %b, <2 x half> %c
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -376,10 +376,10 @@ define <2 x half> @fma_f16x2_no_nans(<2 x half> %a, <2 x half> %b, <2 x half> %c
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -390,10 +390,10 @@ define <2 x half> @fma_f16x2_no_nans(<2 x half> %a, <2 x half> %b, <2 x half> %c
 ; CHECK-SM70-NEXT:    .reg .b32 %r<7>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 %r5, 0;
 ; CHECK-SM70-NEXT:    setp.gt.f16x2 %p1|%p2, %r4, %r5;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
@@ -415,10 +415,10 @@ define <2 x half> @fma_f16x2_no_nans_multiple_uses_of_fma(<2 x half> %a, <2 x ha
 ; CHECK-NEXT:    .reg .b32 %r<8>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 1191200512;
 ; CHECK-NEXT:    add.f16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    add.f16x2 %r7, %r6, %r4;
@@ -430,10 +430,10 @@ define <2 x half> @fma_f16x2_no_nans_multiple_uses_of_fma(<2 x half> %a, <2 x ha
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<8>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 1191200512;
 ; CHECK-FTZ-NEXT:    add.ftz.f16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    add.ftz.f16x2 %r7, %r6, %r4;
@@ -445,10 +445,10 @@ define <2 x half> @fma_f16x2_no_nans_multiple_uses_of_fma(<2 x half> %a, <2 x ha
 ; CHECK-SM70-NEXT:    .reg .b32 %r<8>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 %r5, 1191200512;
 ; CHECK-SM70-NEXT:    add.f16x2 %r6, %r4, %r5;
 ; CHECK-SM70-NEXT:    add.f16x2 %r7, %r6, %r4;
@@ -468,10 +468,10 @@ define <2 x half> @fma_f16x2_maxnum_no_nans(<2 x half> %a, <2 x half> %b, <2 x h
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_maxnum_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -480,10 +480,10 @@ define <2 x half> @fma_f16x2_maxnum_no_nans(<2 x half> %a, <2 x half> %b, <2 x h
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_maxnum_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -493,10 +493,10 @@ define <2 x half> @fma_f16x2_maxnum_no_nans(<2 x half> %a, <2 x half> %b, <2 x h
 ; CHECK-SM70-NEXT:    .reg .b32 %r<10>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_maxnum_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
 ; CHECK-SM70-NEXT:    cvt.f32.f16 %r5, %rs2;
 ; CHECK-SM70-NEXT:    max.f32 %r6, %r5, 0f00000000;
@@ -518,10 +518,10 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -530,10 +530,10 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -545,18 +545,18 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
 ; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_0];
-; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_2];
-; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r3;
-; CHECK-SM70-NEXT:    cvt.u32.u16 %r4, %rs1;
-; CHECK-SM70-NEXT:    shl.b32 %r5, %r4, 16;
-; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
+; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_param_2];
+; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-SM70-NEXT:    cvt.u32.u16 %r3, %rs1;
+; CHECK-SM70-NEXT:    shl.b32 %r4, %r3, 16;
+; CHECK-SM70-NEXT:    ld.param.b32 %r5, [fma_bf16x2_no_nans_param_1];
+; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r5;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r6, %rs3;
 ; CHECK-SM70-NEXT:    shl.b32 %r7, %r6, 16;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs5, %rs6}, %r1;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r8, %rs5;
 ; CHECK-SM70-NEXT:    shl.b32 %r9, %r8, 16;
-; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r5;
+; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r4;
 ; CHECK-SM70-NEXT:    bfe.u32 %r11, %r10, 16, 1;
 ; CHECK-SM70-NEXT:    add.s32 %r12, %r11, %r10;
 ; CHECK-SM70-NEXT:    add.s32 %r13, %r12, 32767;
@@ -600,10 +600,10 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-NEXT:    .reg .b32 %r<9>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 1065369472;
 ; CHECK-NEXT:    mov.b32 %r6, 1088438496;
 ; CHECK-NEXT:    fma.rn.bf16x2 %r7, %r4, %r5, %r6;
@@ -617,10 +617,10 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<18>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
 ; CHECK-FTZ-NEXT:    cvt.u32.u16 %r5, %rs2;
 ; CHECK-FTZ-NEXT:    shl.b32 %r6, %r5, 16;
@@ -648,18 +648,18 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
 ; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
-; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r3;
-; CHECK-SM70-NEXT:    cvt.u32.u16 %r4, %rs2;
-; CHECK-SM70-NEXT:    shl.b32 %r5, %r4, 16;
-; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
+; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-SM70-NEXT:    cvt.u32.u16 %r3, %rs2;
+; CHECK-SM70-NEXT:    shl.b32 %r4, %r3, 16;
+; CHECK-SM70-NEXT:    ld.param.b32 %r5, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1];
+; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r5;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r6, %rs4;
 ; CHECK-SM70-NEXT:    shl.b32 %r7, %r6, 16;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs5, %rs6}, %r1;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r8, %rs6;
 ; CHECK-SM70-NEXT:    shl.b32 %r9, %r8, 16;
-; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r5;
+; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r4;
 ; CHECK-SM70-NEXT:    bfe.u32 %r11, %r10, 16, 1;
 ; CHECK-SM70-NEXT:    add.s32 %r12, %r11, %r10;
 ; CHECK-SM70-NEXT:    add.s32 %r13, %r12, 32767;
@@ -728,10 +728,10 @@ define <2 x bfloat> @fma_bf16x2_maxnum_no_nans(<2 x bfloat> %a, <2 x bfloat> %b,
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_maxnum_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -740,10 +740,10 @@ define <2 x bfloat> @fma_bf16x2_maxnum_no_nans(<2 x bfloat> %a, <2 x bfloat> %b,
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_maxnum_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -755,18 +755,18 @@ define <2 x bfloat> @fma_bf16x2_maxnum_no_nans(<2 x bfloat> %a, <2 x bfloat> %b,
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
 ; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_0];
-; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_maxnum_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_2];
-; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r3;
-; CHECK-SM70-NEXT:    cvt.u32.u16 %r4, %rs1;
-; CHECK-SM70-NEXT:    shl.b32 %r5, %r4, 16;
-; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
+; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-SM70-NEXT:    cvt.u32.u16 %r3, %rs1;
+; CHECK-SM70-NEXT:    shl.b32 %r4, %r3, 16;
+; CHECK-SM70-NEXT:    ld.param.b32 %r5, [fma_bf16x2_maxnum_no_nans_param_1];
+; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r5;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r6, %rs3;
 ; CHECK-SM70-NEXT:    shl.b32 %r7, %r6, 16;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs5, %rs6}, %r1;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r8, %rs5;
 ; CHECK-SM70-NEXT:    shl.b32 %r9, %r8, 16;
-; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r5;
+; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r4;
 ; CHECK-SM70-NEXT:    bfe.u32 %r11, %r10, 16, 1;
 ; CHECK-SM70-NEXT:    add.s32 %r12, %r11, %r10;
 ; CHECK-SM70-NEXT:    add.s32 %r13, %r12, 32767;
diff --git a/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll b/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll
index 6b462f8468596..e9fde441ce67f 100644
--- a/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll
@@ -393,10 +393,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans(<2 x half> %a, <2 x half> %b, <2 x
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -405,10 +405,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans(<2 x half> %a, <2 x half> %b, <2 x
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -419,10 +419,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans(<2 x half> %a, <2 x half> %b, <2 x
 ; CHECK-SM70-NEXT:    .reg .b32 %r<7>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 %r5, 0;
 ; CHECK-SM70-NEXT:    setp.gt.f16x2 %p1|%p2, %r4, %r5;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
@@ -447,10 +447,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans_multiple_uses_of_fma(<2 x half> %a
 ; CHECK-NEXT:    .reg .b32 %r<10>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 0;
 ; CHECK-NEXT:    max.f16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    mov.b32 %r7, 1191200512;
@@ -464,10 +464,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans_multiple_uses_of_fma(<2 x half> %a
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<10>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 0;
 ; CHECK-FTZ-NEXT:    max.ftz.f16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    mov.b32 %r7, 1191200512;
@@ -483,10 +483,10 @@ define <2 x half> @fma_f16x2_expanded_no_nans_multiple_uses_of_fma(<2 x half> %a
 ; CHECK-SM70-NEXT:    .reg .b32 %r<10>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 %r5, 0;
 ; CHECK-SM70-NEXT:    setp.gt.f16x2 %p1|%p2, %r4, %r5;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
@@ -513,10 +513,10 @@ define <2 x half> @fma_f16x2_expanded_maxnum_no_nans(<2 x half> %a, <2 x half> %
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -525,10 +525,10 @@ define <2 x half> @fma_f16x2_expanded_maxnum_no_nans(<2 x half> %a, <2 x half> %
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -538,10 +538,10 @@ define <2 x half> @fma_f16x2_expanded_maxnum_no_nans(<2 x half> %a, <2 x half> %
 ; CHECK-SM70-NEXT:    .reg .b32 %r<10>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
 ; CHECK-SM70-NEXT:    cvt.f32.f16 %r5, %rs2;
 ; CHECK-SM70-NEXT:    max.f32 %r6, %r5, 0f00000000;
@@ -564,10 +564,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> %
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -576,10 +576,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> %
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -647,10 +647,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
 ; CHECK-NEXT:    .reg .b32 %r<11>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 0;
 ; CHECK-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    mov.b32 %r7, 1065369472;
@@ -666,10 +666,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<24>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 0;
 ; CHECK-FTZ-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
@@ -795,10 +795,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_maxnum_no_nans(<2 x bfloat> %a, <2 x bf
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -807,10 +807,10 @@ define <2 x bfloat> @fma_bf16x2_expanded_maxnum_no_nans(<2 x bfloat> %a, <2 x bf
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_expanded_maxnum_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_maxnum_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_expanded_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -1233,10 +1233,10 @@ define <2 x half> @fma_f16x2_no_nans(<2 x half> %a, <2 x half> %b, <2 x half> %c
 ; CHECK-NEXT:    .reg .b32 %r<7>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 0;
 ; CHECK-NEXT:    max.f16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r6;
@@ -1247,10 +1247,10 @@ define <2 x half> @fma_f16x2_no_nans(<2 x half> %a, <2 x half> %b, <2 x half> %c
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<7>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 0;
 ; CHECK-FTZ-NEXT:    max.ftz.f16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r6;
@@ -1263,10 +1263,10 @@ define <2 x half> @fma_f16x2_no_nans(<2 x half> %a, <2 x half> %b, <2 x half> %c
 ; CHECK-SM70-NEXT:    .reg .b32 %r<7>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 %r5, 0;
 ; CHECK-SM70-NEXT:    setp.gt.f16x2 %p1|%p2, %r4, %r5;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
@@ -1288,10 +1288,10 @@ define <2 x half> @fma_f16x2_no_nans_multiple_uses_of_fma(<2 x half> %a, <2 x ha
 ; CHECK-NEXT:    .reg .b32 %r<8>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 1191200512;
 ; CHECK-NEXT:    add.rn.f16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    add.rn.f16x2 %r7, %r6, %r4;
@@ -1303,10 +1303,10 @@ define <2 x half> @fma_f16x2_no_nans_multiple_uses_of_fma(<2 x half> %a, <2 x ha
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<8>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 1191200512;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f16x2 %r7, %r6, %r4;
@@ -1318,10 +1318,10 @@ define <2 x half> @fma_f16x2_no_nans_multiple_uses_of_fma(<2 x half> %a, <2 x ha
 ; CHECK-SM70-NEXT:    .reg .b32 %r<8>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 %r5, 1191200512;
 ; CHECK-SM70-NEXT:    add.rn.f16x2 %r6, %r4, %r5;
 ; CHECK-SM70-NEXT:    add.rn.f16x2 %r7, %r6, %r4;
@@ -1341,10 +1341,10 @@ define <2 x half> @fma_f16x2_maxnum_no_nans(<2 x half> %a, <2 x half> %b, <2 x h
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_f16x2_maxnum_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -1353,10 +1353,10 @@ define <2 x half> @fma_f16x2_maxnum_no_nans(<2 x half> %a, <2 x half> %b, <2 x h
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_f16x2_maxnum_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.ftz.relu.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -1366,10 +1366,10 @@ define <2 x half> @fma_f16x2_maxnum_no_nans(<2 x half> %a, <2 x half> %b, <2 x h
 ; CHECK-SM70-NEXT:    .reg .b32 %r<10>;
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
-; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_f16x2_maxnum_no_nans_param_0];
 ; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_f16x2_maxnum_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_0];
-; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r3, %r2, %r1;
+; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_f16x2_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    fma.rn.f16x2 %r4, %r1, %r2, %r3;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
 ; CHECK-SM70-NEXT:    cvt.f32.f16 %r5, %rs2;
 ; CHECK-SM70-NEXT:    max.f32 %r6, %r5, 0f00000000;
@@ -1391,10 +1391,10 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -1403,10 +1403,10 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -1418,18 +1418,18 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
 ; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_param_0];
-; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_param_2];
-; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r3;
-; CHECK-SM70-NEXT:    cvt.u32.u16 %r4, %rs1;
-; CHECK-SM70-NEXT:    shl.b32 %r5, %r4, 16;
-; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
+; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_param_2];
+; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-SM70-NEXT:    cvt.u32.u16 %r3, %rs1;
+; CHECK-SM70-NEXT:    shl.b32 %r4, %r3, 16;
+; CHECK-SM70-NEXT:    ld.param.b32 %r5, [fma_bf16x2_no_nans_param_1];
+; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r5;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r6, %rs3;
 ; CHECK-SM70-NEXT:    shl.b32 %r7, %r6, 16;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs5, %rs6}, %r1;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r8, %rs5;
 ; CHECK-SM70-NEXT:    shl.b32 %r9, %r8, 16;
-; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r5;
+; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r4;
 ; CHECK-SM70-NEXT:    bfe.u32 %r11, %r10, 16, 1;
 ; CHECK-SM70-NEXT:    add.s32 %r12, %r11, %r10;
 ; CHECK-SM70-NEXT:    add.s32 %r13, %r12, 32767;
@@ -1473,10 +1473,10 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-NEXT:    .reg .b32 %r<9>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    mov.b32 %r5, 1065369472;
 ; CHECK-NEXT:    mov.b32 %r6, 1088438496;
 ; CHECK-NEXT:    fma.rn.bf16x2 %r7, %r4, %r5, %r6;
@@ -1490,10 +1490,10 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<18>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
 ; CHECK-FTZ-NEXT:    cvt.u32.u16 %r5, %rs2;
 ; CHECK-FTZ-NEXT:    shl.b32 %r6, %r5, 16;
@@ -1521,18 +1521,18 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
 ; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
-; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
-; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r3;
-; CHECK-SM70-NEXT:    cvt.u32.u16 %r4, %rs2;
-; CHECK-SM70-NEXT:    shl.b32 %r5, %r4, 16;
-; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
+; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_2];
+; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-SM70-NEXT:    cvt.u32.u16 %r3, %rs2;
+; CHECK-SM70-NEXT:    shl.b32 %r4, %r3, 16;
+; CHECK-SM70-NEXT:    ld.param.b32 %r5, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_1];
+; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r5;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r6, %rs4;
 ; CHECK-SM70-NEXT:    shl.b32 %r7, %r6, 16;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs5, %rs6}, %r1;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r8, %rs6;
 ; CHECK-SM70-NEXT:    shl.b32 %r9, %r8, 16;
-; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r5;
+; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r4;
 ; CHECK-SM70-NEXT:    bfe.u32 %r11, %r10, 16, 1;
 ; CHECK-SM70-NEXT:    add.s32 %r12, %r11, %r10;
 ; CHECK-SM70-NEXT:    add.s32 %r13, %r12, 32767;
@@ -1601,10 +1601,10 @@ define <2 x bfloat> @fma_bf16x2_maxnum_no_nans(<2 x bfloat> %a, <2 x bfloat> %b,
 ; CHECK-NEXT:    .reg .b32 %r<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [fma_bf16x2_maxnum_no_nans_param_1];
-; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_0];
-; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-NEXT:    ret;
 ;
@@ -1613,10 +1613,10 @@ define <2 x bfloat> @fma_bf16x2_maxnum_no_nans(<2 x bfloat> %a, <2 x bfloat> %b,
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<5>;
 ; CHECK-FTZ-EMPTY:
 ; CHECK-FTZ-NEXT:  // %bb.0:
-; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_0];
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r2, [fma_bf16x2_maxnum_no_nans_param_1];
-; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_0];
-; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r3, %r2, %r1;
+; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-FTZ-NEXT:    fma.rn.relu.bf16x2 %r4, %r1, %r2, %r3;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r4;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -1628,18 +1628,18 @@ define <2 x bfloat> @fma_bf16x2_maxnum_no_nans(<2 x bfloat> %a, <2 x bfloat> %b,
 ; CHECK-SM70-EMPTY:
 ; CHECK-SM70-NEXT:  // %bb.0:
 ; CHECK-SM70-NEXT:    ld.param.b32 %r1, [fma_bf16x2_maxnum_no_nans_param_0];
-; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_maxnum_no_nans_param_1];
-; CHECK-SM70-NEXT:    ld.param.b32 %r3, [fma_bf16x2_maxnum_no_nans_param_2];
-; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r3;
-; CHECK-SM70-NEXT:    cvt.u32.u16 %r4, %rs1;
-; CHECK-SM70-NEXT:    shl.b32 %r5, %r4, 16;
-; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r2;
+; CHECK-SM70-NEXT:    ld.param.b32 %r2, [fma_bf16x2_maxnum_no_nans_param_2];
+; CHECK-SM70-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
+; CHECK-SM70-NEXT:    cvt.u32.u16 %r3, %rs1;
+; CHECK-SM70-NEXT:    shl.b32 %r4, %r3, 16;
+; CHECK-SM70-NEXT:    ld.param.b32 %r5, [fma_bf16x2_maxnum_no_nans_param_1];
+; CHECK-SM70-NEXT:    mov.b32 {%rs3, %rs4}, %r5;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r6, %rs3;
 ; CHECK-SM70-NEXT:    shl.b32 %r7, %r6, 16;
 ; CHECK-SM70-NEXT:    mov.b32 {%rs5, %rs6}, %r1;
 ; CHECK-SM70-NEXT:    cvt.u32.u16 %r8, %rs5;
 ; CHECK-SM70-NEXT:    shl.b32 %r9, %r8, 16;
-; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r5;
+; CHECK-SM70-NEXT:    fma.rn.f32 %r10, %r9, %r7, %r4;
 ; CHECK-SM70-NEXT:    bfe.u32 %r11, %r10, 16, 1;
 ; CHECK-SM70-NEXT:    add.s32 %r12, %r11, %r10;
 ; CHECK-SM70-NEXT:    add.s32 %r13, %r12, 32767;
diff --git a/llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll b/llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll
new file mode 100644
index 0000000000000..6b808003259c4
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/fp-contract-f32x2.ll
@@ -0,0 +1,139 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_100 -fp-contract=fast | FileCheck %s --check-prefixes=CHECK,FAST
+; RUN: llc < %s -mtriple=nvptx64 -mcpu=sm_100 | FileCheck %s --check-prefixes=CHECK,DEFAULT
+; RUN: %if ptxas-12.8 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_100 -fp-contract=fast | %ptxas-verify -arch sm_100 %}
+; RUN: %if ptxas-12.8 %{ llc < %s -mtriple=nvptx64 -mcpu=sm_100 | %ptxas-verify -arch sm_100 %}
+
+target triple = "nvptx64-unknown-cuda"
+
+;; FAST-LABEL: @t0
+;; DEFAULT-LABEL: @t0
+define <2 x float> @t0(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; FAST-LABEL: t0(
+; FAST:       {
+; FAST-NEXT:    .reg .b32 %r<9>;
+; FAST-NEXT:    .reg .b64 %rd<5>;
+; FAST-EMPTY:
+; FAST-NEXT:  // %bb.0:
+; FAST-NEXT:    ld.param.v2.b32 {%r1, %r2}, [t0_param_0];
+; FAST-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; FAST-NEXT:    ld.param.v2.b32 {%r3, %r4}, [t0_param_1];
+; FAST-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; FAST-NEXT:    ld.param.v2.b32 {%r5, %r6}, [t0_param_2];
+; FAST-NEXT:    mov.b64 %rd3, {%r5, %r6};
+; FAST-NEXT:    fma.rn.f32x2 %rd4, %rd1, %rd2, %rd3;
+; FAST-NEXT:    mov.b64 {%r7, %r8}, %rd4;
+; FAST-NEXT:    st.param.v2.b32 [func_retval0], {%r7, %r8};
+; FAST-NEXT:    ret;
+;
+; DEFAULT-LABEL: t0(
+; DEFAULT:       {
+; DEFAULT-NEXT:    .reg .b32 %r<9>;
+; DEFAULT-NEXT:    .reg .b64 %rd<6>;
+; DEFAULT-EMPTY:
+; DEFAULT-NEXT:  // %bb.0:
+; DEFAULT-NEXT:    ld.param.v2.b32 {%r1, %r2}, [t0_param_0];
+; DEFAULT-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; DEFAULT-NEXT:    ld.param.v2.b32 {%r3, %r4}, [t0_param_1];
+; DEFAULT-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; DEFAULT-NEXT:    mul.rn.f32x2 %rd3, %rd1, %rd2;
+; DEFAULT-NEXT:    ld.param.v2.b32 {%r5, %r6}, [t0_param_2];
+; DEFAULT-NEXT:    mov.b64 %rd4, {%r5, %r6};
+; DEFAULT-NEXT:    add.rn.f32x2 %rd5, %rd3, %rd4;
+; DEFAULT-NEXT:    mov.b64 {%r7, %r8}, %rd5;
+; DEFAULT-NEXT:    st.param.v2.b32 [func_retval0], {%r7, %r8};
+; DEFAULT-NEXT:    ret;
+  %v0 = fmul <2 x float> %a, %b
+  %v1 = fadd <2 x float> %v0, %c
+  ret <2 x float> %v1
+}
+
+;; We cannot form an fma here, but make sure we explicitly emit add.rn.f32x2
+;; to prevent ptxas from fusing this with anything else.
+define <2 x float> @t1(<2 x float> %a, <2 x float> %b) {
+; FAST-LABEL: t1(
+; FAST:       {
+; FAST-NEXT:    .reg .b32 %r<7>;
+; FAST-NEXT:    .reg .b64 %rd<6>;
+; FAST-EMPTY:
+; FAST-NEXT:  // %bb.0:
+; FAST-NEXT:    ld.param.v2.b32 {%r1, %r2}, [t1_param_0];
+; FAST-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; FAST-NEXT:    ld.param.v2.b32 {%r3, %r4}, [t1_param_1];
+; FAST-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; FAST-NEXT:    add.f32x2 %rd3, %rd1, %rd2;
+; FAST-NEXT:    sub.f32x2 %rd4, %rd1, %rd2;
+; FAST-NEXT:    mul.f32x2 %rd5, %rd3, %rd4;
+; FAST-NEXT:    mov.b64 {%r5, %r6}, %rd5;
+; FAST-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; FAST-NEXT:    ret;
+;
+; DEFAULT-LABEL: t1(
+; DEFAULT:       {
+; DEFAULT-NEXT:    .reg .b32 %r<7>;
+; DEFAULT-NEXT:    .reg .b64 %rd<6>;
+; DEFAULT-EMPTY:
+; DEFAULT-NEXT:  // %bb.0:
+; DEFAULT-NEXT:    ld.param.v2.b32 {%r1, %r2}, [t1_param_0];
+; DEFAULT-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; DEFAULT-NEXT:    ld.param.v2.b32 {%r3, %r4}, [t1_param_1];
+; DEFAULT-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; DEFAULT-NEXT:    add.rn.f32x2 %rd3, %rd1, %rd2;
+; DEFAULT-NEXT:    sub.rn.f32x2 %rd4, %rd1, %rd2;
+; DEFAULT-NEXT:    mul.rn.f32x2 %rd5, %rd3, %rd4;
+; DEFAULT-NEXT:    mov.b64 {%r5, %r6}, %rd5;
+; DEFAULT-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; DEFAULT-NEXT:    ret;
+  %v1 = fadd <2 x float> %a, %b
+  %v2 = fsub <2 x float> %a, %b
+  %v3 = fmul <2 x float> %v1, %v2
+  ret <2 x float> %v3
+}
+
+;; Make sure we generate the non ".rn" version when the "contract" flag is
+;; present on the instructions
+define <2 x float> @t2(<2 x float> %a, <2 x float> %b) {
+; CHECK-LABEL: t2(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<7>;
+; CHECK-NEXT:    .reg .b64 %rd<6>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [t2_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [t2_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    add.f32x2 %rd3, %rd1, %rd2;
+; CHECK-NEXT:    sub.f32x2 %rd4, %rd1, %rd2;
+; CHECK-NEXT:    mul.f32x2 %rd5, %rd3, %rd4;
+; CHECK-NEXT:    mov.b64 {%r5, %r6}, %rd5;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r5, %r6};
+; CHECK-NEXT:    ret;
+  %v1 = fadd contract <2 x float> %a, %b
+  %v2 = fsub contract <2 x float> %a, %b
+  %v3 = fmul contract <2 x float> %v1, %v2
+  ret <2 x float> %v3
+}
+
+;; Make sure we always fold to fma when the "contract" flag is present
+define <2 x float> @t3(<2 x float> %a, <2 x float> %b, <2 x float> %c) {
+; CHECK-LABEL: t3(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<9>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [t3_param_0];
+; CHECK-NEXT:    mov.b64 %rd1, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [t3_param_1];
+; CHECK-NEXT:    mov.b64 %rd2, {%r3, %r4};
+; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [t3_param_2];
+; CHECK-NEXT:    mov.b64 %rd3, {%r5, %r6};
+; CHECK-NEXT:    fma.rn.f32x2 %rd4, %rd1, %rd2, %rd3;
+; CHECK-NEXT:    mov.b64 {%r7, %r8}, %rd4;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r7, %r8};
+; CHECK-NEXT:    ret;
+  %v0 = fmul contract <2 x float> %a, %b
+  %v1 = fadd contract <2 x float> %v0, %c
+  ret <2 x float> %v1
+}
diff --git a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
index 4e1d13696edfe..97e5702f8a75f 100644
--- a/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/i8x4-instructions.ll
@@ -268,19 +268,19 @@ define <4 x i8> @test_smax(<4 x i8> %a, <4 x i8> %b) #0 {
 ; CHECK-NEXT:    bfe.s32 %r9, %r2, 24, 8;
 ; CHECK-NEXT:    bfe.s32 %r10, %r1, 24, 8;
 ; CHECK-NEXT:    setp.gt.s32 %p4, %r10, %r9;
-; CHECK-NEXT:    bfe.u32 %r11, %r1, 0, 8;
-; CHECK-NEXT:    bfe.u32 %r12, %r1, 8, 8;
-; CHECK-NEXT:    bfe.u32 %r13, %r1, 16, 8;
-; CHECK-NEXT:    bfe.u32 %r14, %r1, 24, 8;
-; CHECK-NEXT:    bfe.u32 %r15, %r2, 24, 8;
-; CHECK-NEXT:    selp.b32 %r16, %r14, %r15, %p4;
-; CHECK-NEXT:    bfe.u32 %r17, %r2, 16, 8;
-; CHECK-NEXT:    selp.b32 %r18, %r13, %r17, %p3;
+; CHECK-NEXT:    bfe.u32 %r11, %r2, 0, 8;
+; CHECK-NEXT:    bfe.u32 %r12, %r2, 8, 8;
+; CHECK-NEXT:    bfe.u32 %r13, %r2, 16, 8;
+; CHECK-NEXT:    bfe.u32 %r14, %r2, 24, 8;
+; CHECK-NEXT:    bfe.u32 %r15, %r1, 24, 8;
+; CHECK-NEXT:    selp.b32 %r16, %r15, %r14, %p4;
+; CHECK-NEXT:    bfe.u32 %r17, %r1, 16, 8;
+; CHECK-NEXT:    selp.b32 %r18, %r17, %r13, %p3;
 ; CHECK-NEXT:    prmt.b32 %r19, %r18, %r16, 0x3340U;
-; CHECK-NEXT:    bfe.u32 %r20, %r2, 8, 8;
-; CHECK-NEXT:    selp.b32 %r21, %r12, %r20, %p2;
-; CHECK-NEXT:    bfe.u32 %r22, %r2, 0, 8;
-; CHECK-NEXT:    selp.b32 %r23, %r11, %r22, %p1;
+; CHECK-NEXT:    bfe.u32 %r20, %r1, 8, 8;
+; CHECK-NEXT:    selp.b32 %r21, %r20, %r12, %p2;
+; CHECK-NEXT:    bfe.u32 %r22, %r1, 0, 8;
+; CHECK-NEXT:    selp.b32 %r23, %r22, %r11, %p1;
 ; CHECK-NEXT:    prmt.b32 %r24, %r23, %r21, 0x3340U;
 ; CHECK-NEXT:    prmt.b32 %r25, %r24, %r19, 0x5410U;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r25;
@@ -346,19 +346,19 @@ define <4 x i8> @test_smin(<4 x i8> %a, <4 x i8> %b) #0 {
 ; CHECK-NEXT:    bfe.s32 %r9, %r2, 24, 8;
 ; CHECK-NEXT:    bfe.s32 %r10, %r1, 24, 8;
 ; CHECK-NEXT:    setp.le.s32 %p4, %r10, %r9;
-; CHECK-NEXT:    bfe.u32 %r11, %r1, 0, 8;
-; CHECK-NEXT:    bfe.u32 %r12, %r1, 8, 8;
-; CHECK-NEXT:    bfe.u32 %r13, %r1, 16, 8;
-; CHECK-NEXT:    bfe.u32 %r14, %r1, 24, 8;
-; CHECK-NEXT:    bfe.u32 %r15, %r2, 24, 8;
-; CHECK-NEXT:    selp.b32 %r16, %r14, %r15, %p4;
-; CHECK-NEXT:    bfe.u32 %r17, %r2, 16, 8;
-; CHECK-NEXT:    selp.b32 %r18, %r13, %r17, %p3;
+; CHECK-NEXT:    bfe.u32 %r11, %r2, 0, 8;
+; CHECK-NEXT:    bfe.u32 %r12, %r2, 8, 8;
+; CHECK-NEXT:    bfe.u32 %r13, %r2, 16, 8;
+; CHECK-NEXT:    bfe.u32 %r14, %r2, 24, 8;
+; CHECK-NEXT:    bfe.u32 %r15, %r1, 24, 8;
+; CHECK-NEXT:    selp.b32 %r16, %r15, %r14, %p4;
+; CHECK-NEXT:    bfe.u32 %r17, %r1, 16, 8;
+; CHECK-NEXT:    selp.b32 %r18, %r17, %r13, %p3;
 ; CHECK-NEXT:    prmt.b32 %r19, %r18, %r16, 0x3340U;
-; CHECK-NEXT:    bfe.u32 %r20, %r2, 8, 8;
-; CHECK-NEXT:    selp.b32 %r21, %r12, %r20, %p2;
-; CHECK-NEXT:    bfe.u32 %r22, %r2, 0, 8;
-; CHECK-NEXT:    selp.b32 %r23, %r11, %r22, %p1;
+; CHECK-NEXT:    bfe.u32 %r20, %r1, 8, 8;
+; CHECK-NEXT:    selp.b32 %r21, %r20, %r12, %p2;
+; CHECK-NEXT:    bfe.u32 %r22, %r1, 0, 8;
+; CHECK-NEXT:    selp.b32 %r23, %r22, %r11, %p1;
 ; CHECK-NEXT:    prmt.b32 %r24, %r23, %r21, 0x3340U;
 ; CHECK-NEXT:    prmt.b32 %r25, %r24, %r19, 0x5410U;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r25;
@@ -1256,18 +1256,18 @@ define <4 x i8> @test_fptosi_4xhalf_to_4xi8(<4 x half> %a) #0 {
 ; CHECK-NEXT:    .reg .b32 %r<12>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fptosi_4xhalf_to_4xi8_param_0];
-; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; CHECK-NEXT:    cvt.rzi.s16.f16 %rs3, %rs2;
-; CHECK-NEXT:    cvt.rzi.s16.f16 %rs4, %rs1;
-; CHECK-NEXT:    mov.b32 %r3, {%rs4, %rs3};
-; CHECK-NEXT:    mov.b32 {%rs5, %rs6}, %r3;
-; CHECK-NEXT:    cvt.u32.u16 %r4, %rs6;
-; CHECK-NEXT:    cvt.u32.u16 %r5, %rs5;
+; CHECK-NEXT:    ld.param.v4.b16 {%rs1, %rs2, %rs3, %rs4}, [test_fptosi_4xhalf_to_4xi8_param_0];
+; CHECK-NEXT:    mov.b32 %r2, {%rs3, %rs4};
+; CHECK-NEXT:    mov.b32 %r1, {%rs1, %rs2};
+; CHECK-NEXT:    cvt.rzi.s16.f16 %rs5, %rs4;
+; CHECK-NEXT:    cvt.rzi.s16.f16 %rs6, %rs3;
+; CHECK-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; CHECK-NEXT:    mov.b32 {%rs7, %rs8}, %r3;
+; CHECK-NEXT:    cvt.u32.u16 %r4, %rs8;
+; CHECK-NEXT:    cvt.u32.u16 %r5, %rs7;
 ; CHECK-NEXT:    prmt.b32 %r6, %r5, %r4, 0x3340U;
-; CHECK-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
-; CHECK-NEXT:    cvt.rzi.s16.f16 %rs9, %rs8;
-; CHECK-NEXT:    cvt.rzi.s16.f16 %rs10, %rs7;
+; CHECK-NEXT:    cvt.rzi.s16.f16 %rs9, %rs2;
+; CHECK-NEXT:    cvt.rzi.s16.f16 %rs10, %rs1;
 ; CHECK-NEXT:    mov.b32 %r7, {%rs10, %rs9};
 ; CHECK-NEXT:    mov.b32 {%rs11, %rs12}, %r7;
 ; CHECK-NEXT:    cvt.u32.u16 %r8, %rs12;
@@ -1287,18 +1287,18 @@ define <4 x i8> @test_fptoui_4xhalf_to_4xi8(<4 x half> %a) #0 {
 ; CHECK-NEXT:    .reg .b32 %r<12>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_fptoui_4xhalf_to_4xi8_param_0];
-; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; CHECK-NEXT:    cvt.rzi.u16.f16 %rs3, %rs2;
-; CHECK-NEXT:    cvt.rzi.u16.f16 %rs4, %rs1;
-; CHECK-NEXT:    mov.b32 %r3, {%rs4, %rs3};
-; CHECK-NEXT:    mov.b32 {%rs5, %rs6}, %r3;
-; CHECK-NEXT:    cvt.u32.u16 %r4, %rs6;
-; CHECK-NEXT:    cvt.u32.u16 %r5, %rs5;
+; CHECK-NEXT:    ld.param.v4.b16 {%rs1, %rs2, %rs3, %rs4}, [test_fptoui_4xhalf_to_4xi8_param_0];
+; CHECK-NEXT:    mov.b32 %r2, {%rs3, %rs4};
+; CHECK-NEXT:    mov.b32 %r1, {%rs1, %rs2};
+; CHECK-NEXT:    cvt.rzi.u16.f16 %rs5, %rs4;
+; CHECK-NEXT:    cvt.rzi.u16.f16 %rs6, %rs3;
+; CHECK-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; CHECK-NEXT:    mov.b32 {%rs7, %rs8}, %r3;
+; CHECK-NEXT:    cvt.u32.u16 %r4, %rs8;
+; CHECK-NEXT:    cvt.u32.u16 %r5, %rs7;
 ; CHECK-NEXT:    prmt.b32 %r6, %r5, %r4, 0x3340U;
-; CHECK-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
-; CHECK-NEXT:    cvt.rzi.u16.f16 %rs9, %rs8;
-; CHECK-NEXT:    cvt.rzi.u16.f16 %rs10, %rs7;
+; CHECK-NEXT:    cvt.rzi.u16.f16 %rs9, %rs2;
+; CHECK-NEXT:    cvt.rzi.u16.f16 %rs10, %rs1;
 ; CHECK-NEXT:    mov.b32 %r7, {%rs10, %rs9};
 ; CHECK-NEXT:    mov.b32 {%rs11, %rs12}, %r7;
 ; CHECK-NEXT:    cvt.u32.u16 %r8, %rs12;
diff --git a/llvm/test/CodeGen/NVPTX/ldparam-v4.ll b/llvm/test/CodeGen/NVPTX/ldparam-v4.ll
index 419c780f7d82a..282da30af8c94 100644
--- a/llvm/test/CodeGen/NVPTX/ldparam-v4.ll
+++ b/llvm/test/CodeGen/NVPTX/ldparam-v4.ll
@@ -7,8 +7,8 @@ declare <4 x float> @bar()
 define void @foo(ptr %ptr) {
 ; CHECK-LABEL: foo(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<9>;
-; CHECK-NEXT:    .reg .b64 %rd<2>;
+; CHECK-NEXT:    .reg .b32 %r<5>;
+; CHECK-NEXT:    .reg .b64 %rd<6>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [foo_param_0];
@@ -18,9 +18,11 @@ define void @foo(ptr %ptr) {
 ; CHECK-NEXT:    bar,
 ; CHECK-NEXT:    (
 ; CHECK-NEXT:    );
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [retval0];
+; CHECK-NEXT:    ld.param.v2.b64 {%rd2, %rd3}, [retval0];
 ; CHECK-NEXT:    } // callseq 0
-; CHECK-NEXT:    st.v4.b32 [%rd1], {%r1, %r2, %r3, %r4};
+; CHECK-NEXT:    mov.b64 {%r1, %r2}, %rd3;
+; CHECK-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-NEXT:    st.v4.b32 [%rd1], {%r3, %r4, %r1, %r2};
 ; CHECK-NEXT:    ret;
   %val = tail call <4 x float> @bar()
   store <4 x float> %val, ptr %ptr
diff --git a/llvm/test/CodeGen/NVPTX/math-intrins.ll b/llvm/test/CodeGen/NVPTX/math-intrins.ll
index 71af7a7d475d3..cd4deef2dd4d2 100644
--- a/llvm/test/CodeGen/NVPTX/math-intrins.ll
+++ b/llvm/test/CodeGen/NVPTX/math-intrins.ll
@@ -576,9 +576,9 @@ define <2 x half> @minnum_v2half(<2 x half> %a, <2 x half> %b) {
 ; CHECK-F16-NEXT:    .reg .b32 %r<4>;
 ; CHECK-F16-EMPTY:
 ; CHECK-F16-NEXT:  // %bb.0:
-; CHECK-F16-NEXT:    ld.param.b32 %r1, [minnum_v2half_param_1];
-; CHECK-F16-NEXT:    ld.param.b32 %r2, [minnum_v2half_param_0];
-; CHECK-F16-NEXT:    min.f16x2 %r3, %r2, %r1;
+; CHECK-F16-NEXT:    ld.param.b32 %r1, [minnum_v2half_param_0];
+; CHECK-F16-NEXT:    ld.param.b32 %r2, [minnum_v2half_param_1];
+; CHECK-F16-NEXT:    min.f16x2 %r3, %r1, %r2;
 ; CHECK-F16-NEXT:    st.param.b32 [func_retval0], %r3;
 ; CHECK-F16-NEXT:    ret;
 ;
@@ -919,9 +919,9 @@ define <2 x half> @minimum_v2half(<2 x half> %a, <2 x half> %b) {
 ; CHECK-F16-NEXT:    .reg .b32 %r<4>;
 ; CHECK-F16-EMPTY:
 ; CHECK-F16-NEXT:  // %bb.0:
-; CHECK-F16-NEXT:    ld.param.b32 %r1, [minimum_v2half_param_1];
-; CHECK-F16-NEXT:    ld.param.b32 %r2, [minimum_v2half_param_0];
-; CHECK-F16-NEXT:    min.NaN.f16x2 %r3, %r2, %r1;
+; CHECK-F16-NEXT:    ld.param.b32 %r1, [minimum_v2half_param_0];
+; CHECK-F16-NEXT:    ld.param.b32 %r2, [minimum_v2half_param_1];
+; CHECK-F16-NEXT:    min.NaN.f16x2 %r3, %r1, %r2;
 ; CHECK-F16-NEXT:    st.param.b32 [func_retval0], %r3;
 ; CHECK-F16-NEXT:    ret;
 ;
@@ -1117,9 +1117,9 @@ define <2 x half> @maxnum_v2half(<2 x half> %a, <2 x half> %b) {
 ; CHECK-F16-NEXT:    .reg .b32 %r<4>;
 ; CHECK-F16-EMPTY:
 ; CHECK-F16-NEXT:  // %bb.0:
-; CHECK-F16-NEXT:    ld.param.b32 %r1, [maxnum_v2half_param_1];
-; CHECK-F16-NEXT:    ld.param.b32 %r2, [maxnum_v2half_param_0];
-; CHECK-F16-NEXT:    max.f16x2 %r3, %r2, %r1;
+; CHECK-F16-NEXT:    ld.param.b32 %r1, [maxnum_v2half_param_0];
+; CHECK-F16-NEXT:    ld.param.b32 %r2, [maxnum_v2half_param_1];
+; CHECK-F16-NEXT:    max.f16x2 %r3, %r1, %r2;
 ; CHECK-F16-NEXT:    st.param.b32 [func_retval0], %r3;
 ; CHECK-F16-NEXT:    ret;
 ;
@@ -1456,9 +1456,9 @@ define <2 x half> @maximum_v2half(<2 x half> %a, <2 x half> %b) {
 ; CHECK-F16-NEXT:    .reg .b32 %r<4>;
 ; CHECK-F16-EMPTY:
 ; CHECK-F16-NEXT:  // %bb.0:
-; CHECK-F16-NEXT:    ld.param.b32 %r1, [maximum_v2half_param_1];
-; CHECK-F16-NEXT:    ld.param.b32 %r2, [maximum_v2half_param_0];
-; CHECK-F16-NEXT:    max.NaN.f16x2 %r3, %r2, %r1;
+; CHECK-F16-NEXT:    ld.param.b32 %r1, [maximum_v2half_param_0];
+; CHECK-F16-NEXT:    ld.param.b32 %r2, [maximum_v2half_param_1];
+; CHECK-F16-NEXT:    max.NaN.f16x2 %r3, %r1, %r2;
 ; CHECK-F16-NEXT:    st.param.b32 [func_retval0], %r3;
 ; CHECK-F16-NEXT:    ret;
 ;
diff --git a/llvm/test/CodeGen/NVPTX/param-load-store.ll b/llvm/test/CodeGen/NVPTX/param-load-store.ll
index ce6707c4564bf..1b419b63e6227 100644
--- a/llvm/test/CodeGen/NVPTX/param-load-store.ll
+++ b/llvm/test/CodeGen/NVPTX/param-load-store.ll
@@ -345,9 +345,11 @@ define <3 x i16> @test_v3i16(<3 x i16> %a) {
 ; CHECK: .func  (.param .align 8 .b8 func_retval0[8])
 ; CHECK-LABEL: test_v4i16(
 ; CHECK-NEXT: .param .align 8 .b8 test_v4i16_param_0[8]
-; CHECK:      ld.param.v2.b32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_v4i16_param_0]
+; CHECK-DAG:  ld.param.v4.b16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i16_param_0]
+; CHECK-DAG:  mov.b32 [[E01:%r[0-9]+]], {[[E0]], [[E1]]}
+; CHECK-DAG:  mov.b32 [[E23:%r[0-9]+]], {[[E2]], [[E3]]}
 ; CHECK:      .param .align 8 .b8 param0[8];
-; CHECK:      st.param.v2.b32 [param0], {[[E0]], [[E1]]};
+; CHECK:      st.param.v2.b32 [param0], {[[E01]], [[E23]]};
 ; CHECK:      .param .align 8 .b8 retval0[8];
 ; CHECK:      call.uni (retval0),
 ; CHECK-NEXT: test_v4i16,
@@ -474,7 +476,9 @@ define <3 x half> @test_v3f16(<3 x half> %a) {
 ; CHECK:.func  (.param .align 8 .b8 func_retval0[8])
 ; CHECK-LABEL: test_v4f16(
 ; CHECK:      .param .align 8 .b8 test_v4f16_param_0[8]
-; CHECK:      ld.param.v2.b32 {[[R01:%r[0-9]+]], [[R23:%r[0-9]+]]}, [test_v4f16_param_0];
+; CHECK-DAG:  ld.param.v4.b16 {[[R0:%rs[0-9]+]], [[R1:%rs[0-9]+]], [[R2:%rs[0-9]+]], [[R3:%rs[0-9]+]]}, [test_v4f16_param_0];
+; CHECK-DAG:  mov.b32 [[R01:%r[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-DAG:  mov.b32 [[R23:%r[0-9]+]], {[[R2]], [[R3]]}
 ; CHECK:      .param .align 8 .b8 param0[8];
 ; CHECK:      st.param.v2.b32 [param0], {[[R01]], [[R23]]};
 ; CHECK:      .param .align 8 .b8 retval0[8];
diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
index d5b451dad7bc3..114d800e0b59a 100644
--- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
+++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
@@ -23,19 +23,19 @@ define half @reduce_fadd_half(<8 x half> %in) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_param_0];
-; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; CHECK-NEXT:    mov.b16 %rs3, 0x0000;
-; CHECK-NEXT:    add.rn.f16 %rs4, %rs1, %rs3;
-; CHECK-NEXT:    add.rn.f16 %rs5, %rs4, %rs2;
-; CHECK-NEXT:    mov.b32 {%rs6, %rs7}, %r2;
-; CHECK-NEXT:    add.rn.f16 %rs8, %rs5, %rs6;
-; CHECK-NEXT:    add.rn.f16 %rs9, %rs8, %rs7;
-; CHECK-NEXT:    mov.b32 {%rs10, %rs11}, %r3;
-; CHECK-NEXT:    add.rn.f16 %rs12, %rs9, %rs10;
-; CHECK-NEXT:    add.rn.f16 %rs13, %rs12, %rs11;
-; CHECK-NEXT:    mov.b32 {%rs14, %rs15}, %r4;
-; CHECK-NEXT:    add.rn.f16 %rs16, %rs13, %rs14;
-; CHECK-NEXT:    add.rn.f16 %rs17, %rs16, %rs15;
+; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
+; CHECK-NEXT:    mov.b32 {%rs3, %rs4}, %r3;
+; CHECK-NEXT:    mov.b32 {%rs5, %rs6}, %r2;
+; CHECK-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
+; CHECK-NEXT:    mov.b16 %rs9, 0x0000;
+; CHECK-NEXT:    add.rn.f16 %rs10, %rs7, %rs9;
+; CHECK-NEXT:    add.rn.f16 %rs11, %rs10, %rs8;
+; CHECK-NEXT:    add.rn.f16 %rs12, %rs11, %rs5;
+; CHECK-NEXT:    add.rn.f16 %rs13, %rs12, %rs6;
+; CHECK-NEXT:    add.rn.f16 %rs14, %rs13, %rs3;
+; CHECK-NEXT:    add.rn.f16 %rs15, %rs14, %rs4;
+; CHECK-NEXT:    add.rn.f16 %rs16, %rs15, %rs1;
+; CHECK-NEXT:    add.rn.f16 %rs17, %rs16, %rs2;
 ; CHECK-NEXT:    st.param.b16 [func_retval0], %rs17;
 ; CHECK-NEXT:    ret;
   %res = call half @llvm.vector.reduce.fadd(half 0.0, <8 x half> %in)
@@ -43,45 +43,22 @@ define half @reduce_fadd_half(<8 x half> %in) {
 }
 
 define half @reduce_fadd_half_reassoc(<8 x half> %in) {
-; CHECK-SM80-LABEL: reduce_fadd_half_reassoc(
-; CHECK-SM80:       {
-; CHECK-SM80-NEXT:    .reg .b16 %rs<6>;
-; CHECK-SM80-NEXT:    .reg .b32 %r<10>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT:  // %bb.0:
-; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0];
-; CHECK-SM80-NEXT:    add.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM80-NEXT:    add.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM80-NEXT:    add.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM80-NEXT:    { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT:    // implicit-def: %rs2
-; CHECK-SM80-NEXT:    mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT:    add.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM80-NEXT:    { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT:    mov.b16 %rs4, 0x0000;
-; CHECK-SM80-NEXT:    add.rn.f16 %rs5, %rs3, %rs4;
-; CHECK-SM80-NEXT:    st.param.b16 [func_retval0], %rs5;
-; CHECK-SM80-NEXT:    ret;
-;
-; CHECK-SM100-LABEL: reduce_fadd_half_reassoc(
-; CHECK-SM100:       {
-; CHECK-SM100-NEXT:    .reg .b16 %rs<6>;
-; CHECK-SM100-NEXT:    .reg .b32 %r<10>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT:  // %bb.0:
-; CHECK-SM100-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0];
-; CHECK-SM100-NEXT:    add.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM100-NEXT:    add.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM100-NEXT:    add.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT:    mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT:    // implicit-def: %rs2
-; CHECK-SM100-NEXT:    mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT:    add.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT:    mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT:    mov.b16 %rs4, 0x0000;
-; CHECK-SM100-NEXT:    add.rn.f16 %rs5, %rs3, %rs4;
-; CHECK-SM100-NEXT:    st.param.b16 [func_retval0], %rs5;
-; CHECK-SM100-NEXT:    ret;
+; CHECK-LABEL: reduce_fadd_half_reassoc(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<6>;
+; CHECK-NEXT:    .reg .b32 %r<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_half_reassoc_param_0];
+; CHECK-NEXT:    add.rn.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT:    add.rn.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT:    add.rn.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT:    add.rn.f16 %rs3, %rs2, %rs1;
+; CHECK-NEXT:    mov.b16 %rs4, 0x0000;
+; CHECK-NEXT:    add.rn.f16 %rs5, %rs3, %rs4;
+; CHECK-NEXT:    st.param.b16 [func_retval0], %rs5;
+; CHECK-NEXT:    ret;
   %res = call reassoc half @llvm.vector.reduce.fadd(half 0.0, <8 x half> %in)
   ret half %res
 }
@@ -116,18 +93,23 @@ define float @reduce_fadd_float(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fadd_float(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<17>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fadd_float_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_param_0];
-; CHECK-NEXT:    add.rn.f32 %r9, %r1, 0f00000000;
-; CHECK-NEXT:    add.rn.f32 %r10, %r9, %r2;
-; CHECK-NEXT:    add.rn.f32 %r11, %r10, %r3;
-; CHECK-NEXT:    add.rn.f32 %r12, %r11, %r4;
-; CHECK-NEXT:    add.rn.f32 %r13, %r12, %r5;
-; CHECK-NEXT:    add.rn.f32 %r14, %r13, %r6;
-; CHECK-NEXT:    add.rn.f32 %r15, %r14, %r7;
-; CHECK-NEXT:    add.rn.f32 %r16, %r15, %r8;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fadd_float_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    add.rn.f32 %r9, %r5, 0f00000000;
+; CHECK-NEXT:    add.rn.f32 %r10, %r9, %r6;
+; CHECK-NEXT:    add.rn.f32 %r11, %r10, %r7;
+; CHECK-NEXT:    add.rn.f32 %r12, %r11, %r8;
+; CHECK-NEXT:    add.rn.f32 %r13, %r12, %r1;
+; CHECK-NEXT:    add.rn.f32 %r14, %r13, %r2;
+; CHECK-NEXT:    add.rn.f32 %r15, %r14, %r3;
+; CHECK-NEXT:    add.rn.f32 %r16, %r15, %r4;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r16;
 ; CHECK-NEXT:    ret;
   %res = call float @llvm.vector.reduce.fadd(float 0.0, <8 x float> %in)
@@ -135,45 +117,94 @@ define float @reduce_fadd_float(<8 x float> %in) {
 }
 
 define float @reduce_fadd_float_reassoc(<8 x float> %in) {
-; CHECK-LABEL: reduce_fadd_float_reassoc(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<17>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fadd_float_reassoc_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_param_0];
-; CHECK-NEXT:    add.rn.f32 %r9, %r3, %r7;
-; CHECK-NEXT:    add.rn.f32 %r10, %r1, %r5;
-; CHECK-NEXT:    add.rn.f32 %r11, %r4, %r8;
-; CHECK-NEXT:    add.rn.f32 %r12, %r2, %r6;
-; CHECK-NEXT:    add.rn.f32 %r13, %r12, %r11;
-; CHECK-NEXT:    add.rn.f32 %r14, %r10, %r9;
-; CHECK-NEXT:    add.rn.f32 %r15, %r14, %r13;
-; CHECK-NEXT:    add.rn.f32 %r16, %r15, 0f00000000;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r16;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_fadd_float_reassoc(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<17>;
+; CHECK-SM80-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-SM80-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fadd_float_reassoc_param_0];
+; CHECK-SM80-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-SM80-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-SM80-NEXT:    add.rn.f32 %r9, %r8, %r4;
+; CHECK-SM80-NEXT:    add.rn.f32 %r10, %r6, %r2;
+; CHECK-SM80-NEXT:    add.rn.f32 %r11, %r7, %r3;
+; CHECK-SM80-NEXT:    add.rn.f32 %r12, %r5, %r1;
+; CHECK-SM80-NEXT:    add.rn.f32 %r13, %r12, %r11;
+; CHECK-SM80-NEXT:    add.rn.f32 %r14, %r10, %r9;
+; CHECK-SM80-NEXT:    add.rn.f32 %r15, %r14, %r13;
+; CHECK-SM80-NEXT:    add.rn.f32 %r16, %r15, 0f00000000;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r16;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_fadd_float_reassoc(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<13>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-SM100-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fadd_float_reassoc_param_0];
+; CHECK-SM100-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-SM100-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-SM100-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-SM100-NEXT:    add.rn.f32x2 %rd5, %rd2, %rd4;
+; CHECK-SM100-NEXT:    add.rn.f32x2 %rd6, %rd1, %rd3;
+; CHECK-SM100-NEXT:    add.rn.f32x2 %rd7, %rd6, %rd5;
+; CHECK-SM100-NEXT:    mov.b64 {%r9, %r10}, %rd7;
+; CHECK-SM100-NEXT:    add.rn.f32 %r11, %r10, %r9;
+; CHECK-SM100-NEXT:    add.rn.f32 %r12, %r11, 0f00000000;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT:    ret;
   %res = call reassoc float @llvm.vector.reduce.fadd(float 0.0, <8 x float> %in)
   ret float %res
 }
 
 define float @reduce_fadd_float_reassoc_nonpow2(<7 x float> %in) {
-; CHECK-LABEL: reduce_fadd_float_reassoc_nonpow2(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<15>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r7, [reduce_fadd_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [reduce_fadd_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT:    add.rn.f32 %r8, %r3, %r7;
-; CHECK-NEXT:    add.rn.f32 %r9, %r1, %r5;
-; CHECK-NEXT:    add.rn.f32 %r10, %r9, %r8;
-; CHECK-NEXT:    add.rn.f32 %r11, %r2, %r6;
-; CHECK-NEXT:    add.rn.f32 %r12, %r11, %r4;
-; CHECK-NEXT:    add.rn.f32 %r13, %r10, %r12;
-; CHECK-NEXT:    add.rn.f32 %r14, %r13, 0f00000000;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r14;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_fadd_float_reassoc_nonpow2(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<15>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.b32 %r7, [reduce_fadd_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT:    ld.param.v2.b32 {%r5, %r6}, [reduce_fadd_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT:    add.rn.f32 %r8, %r3, %r7;
+; CHECK-SM80-NEXT:    add.rn.f32 %r9, %r1, %r5;
+; CHECK-SM80-NEXT:    add.rn.f32 %r10, %r9, %r8;
+; CHECK-SM80-NEXT:    add.rn.f32 %r11, %r2, %r6;
+; CHECK-SM80-NEXT:    add.rn.f32 %r12, %r11, %r4;
+; CHECK-SM80-NEXT:    add.rn.f32 %r13, %r10, %r12;
+; CHECK-SM80-NEXT:    add.rn.f32 %r14, %r13, 0f00000000;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r14;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_fadd_float_reassoc_nonpow2(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<13>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b32 {%r5, %r6}, [reduce_fadd_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-SM100-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fadd_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT:    mov.b64 %rd2, {%r1, %r2};
+; CHECK-SM100-NEXT:    mov.b64 %rd3, {%r3, %r4};
+; CHECK-SM100-NEXT:    ld.param.b32 %r7, [reduce_fadd_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT:    mov.b32 %r8, 0f80000000;
+; CHECK-SM100-NEXT:    mov.b64 %rd4, {%r7, %r8};
+; CHECK-SM100-NEXT:    add.rn.f32x2 %rd5, %rd3, %rd4;
+; CHECK-SM100-NEXT:    add.rn.f32x2 %rd6, %rd2, %rd1;
+; CHECK-SM100-NEXT:    add.rn.f32x2 %rd7, %rd6, %rd5;
+; CHECK-SM100-NEXT:    mov.b64 {%r9, %r10}, %rd7;
+; CHECK-SM100-NEXT:    add.rn.f32 %r11, %r9, %r10;
+; CHECK-SM100-NEXT:    add.rn.f32 %r12, %r11, 0f00000000;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r12;
+; CHECK-SM100-NEXT:    ret;
   %res = call reassoc float @llvm.vector.reduce.fadd(float 0.0, <7 x float> %in)
   ret float %res
 }
@@ -187,17 +218,17 @@ define half @reduce_fmul_half(<8 x half> %in) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_param_0];
-; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; CHECK-NEXT:    mul.rn.f16 %rs3, %rs1, %rs2;
-; CHECK-NEXT:    mov.b32 {%rs4, %rs5}, %r2;
-; CHECK-NEXT:    mul.rn.f16 %rs6, %rs3, %rs4;
-; CHECK-NEXT:    mul.rn.f16 %rs7, %rs6, %rs5;
-; CHECK-NEXT:    mov.b32 {%rs8, %rs9}, %r3;
-; CHECK-NEXT:    mul.rn.f16 %rs10, %rs7, %rs8;
-; CHECK-NEXT:    mul.rn.f16 %rs11, %rs10, %rs9;
-; CHECK-NEXT:    mov.b32 {%rs12, %rs13}, %r4;
-; CHECK-NEXT:    mul.rn.f16 %rs14, %rs11, %rs12;
-; CHECK-NEXT:    mul.rn.f16 %rs15, %rs14, %rs13;
+; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
+; CHECK-NEXT:    mov.b32 {%rs3, %rs4}, %r3;
+; CHECK-NEXT:    mov.b32 {%rs5, %rs6}, %r2;
+; CHECK-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
+; CHECK-NEXT:    mul.rn.f16 %rs9, %rs7, %rs8;
+; CHECK-NEXT:    mul.rn.f16 %rs10, %rs9, %rs5;
+; CHECK-NEXT:    mul.rn.f16 %rs11, %rs10, %rs6;
+; CHECK-NEXT:    mul.rn.f16 %rs12, %rs11, %rs3;
+; CHECK-NEXT:    mul.rn.f16 %rs13, %rs12, %rs4;
+; CHECK-NEXT:    mul.rn.f16 %rs14, %rs13, %rs1;
+; CHECK-NEXT:    mul.rn.f16 %rs15, %rs14, %rs2;
 ; CHECK-NEXT:    st.param.b16 [func_retval0], %rs15;
 ; CHECK-NEXT:    ret;
   %res = call half @llvm.vector.reduce.fmul(half 1.0, <8 x half> %in)
@@ -205,41 +236,20 @@ define half @reduce_fmul_half(<8 x half> %in) {
 }
 
 define half @reduce_fmul_half_reassoc(<8 x half> %in) {
-; CHECK-SM80-LABEL: reduce_fmul_half_reassoc(
-; CHECK-SM80:       {
-; CHECK-SM80-NEXT:    .reg .b16 %rs<4>;
-; CHECK-SM80-NEXT:    .reg .b32 %r<10>;
-; CHECK-SM80-EMPTY:
-; CHECK-SM80-NEXT:  // %bb.0:
-; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0];
-; CHECK-SM80-NEXT:    mul.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM80-NEXT:    mul.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM80-NEXT:    mul.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM80-NEXT:    { .reg .b16 tmp; mov.b32 {tmp, %rs1}, %r7; }
-; CHECK-SM80-NEXT:    // implicit-def: %rs2
-; CHECK-SM80-NEXT:    mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM80-NEXT:    mul.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM80-NEXT:    { .reg .b16 tmp; mov.b32 {%rs3, tmp}, %r9; }
-; CHECK-SM80-NEXT:    st.param.b16 [func_retval0], %rs3;
-; CHECK-SM80-NEXT:    ret;
-;
-; CHECK-SM100-LABEL: reduce_fmul_half_reassoc(
-; CHECK-SM100:       {
-; CHECK-SM100-NEXT:    .reg .b16 %rs<4>;
-; CHECK-SM100-NEXT:    .reg .b32 %r<10>;
-; CHECK-SM100-EMPTY:
-; CHECK-SM100-NEXT:  // %bb.0:
-; CHECK-SM100-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0];
-; CHECK-SM100-NEXT:    mul.rn.f16x2 %r5, %r2, %r4;
-; CHECK-SM100-NEXT:    mul.rn.f16x2 %r6, %r1, %r3;
-; CHECK-SM100-NEXT:    mul.rn.f16x2 %r7, %r6, %r5;
-; CHECK-SM100-NEXT:    mov.b32 {_, %rs1}, %r7;
-; CHECK-SM100-NEXT:    // implicit-def: %rs2
-; CHECK-SM100-NEXT:    mov.b32 %r8, {%rs1, %rs2};
-; CHECK-SM100-NEXT:    mul.rn.f16x2 %r9, %r7, %r8;
-; CHECK-SM100-NEXT:    mov.b32 {%rs3, _}, %r9;
-; CHECK-SM100-NEXT:    st.param.b16 [func_retval0], %rs3;
-; CHECK-SM100-NEXT:    ret;
+; CHECK-LABEL: reduce_fmul_half_reassoc(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<4>;
+; CHECK-NEXT:    .reg .b32 %r<8>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_half_reassoc_param_0];
+; CHECK-NEXT:    mul.rn.f16x2 %r5, %r2, %r4;
+; CHECK-NEXT:    mul.rn.f16x2 %r6, %r1, %r3;
+; CHECK-NEXT:    mul.rn.f16x2 %r7, %r6, %r5;
+; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r7;
+; CHECK-NEXT:    mul.rn.f16 %rs3, %rs2, %rs1;
+; CHECK-NEXT:    st.param.b16 [func_retval0], %rs3;
+; CHECK-NEXT:    ret;
   %res = call reassoc half @llvm.vector.reduce.fmul(half 1.0, <8 x half> %in)
   ret half %res
 }
@@ -275,17 +285,22 @@ define float @reduce_fmul_float(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fmul_float(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<16>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmul_float_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_param_0];
-; CHECK-NEXT:    mul.rn.f32 %r9, %r1, %r2;
-; CHECK-NEXT:    mul.rn.f32 %r10, %r9, %r3;
-; CHECK-NEXT:    mul.rn.f32 %r11, %r10, %r4;
-; CHECK-NEXT:    mul.rn.f32 %r12, %r11, %r5;
-; CHECK-NEXT:    mul.rn.f32 %r13, %r12, %r6;
-; CHECK-NEXT:    mul.rn.f32 %r14, %r13, %r7;
-; CHECK-NEXT:    mul.rn.f32 %r15, %r14, %r8;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmul_float_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    mul.rn.f32 %r9, %r5, %r6;
+; CHECK-NEXT:    mul.rn.f32 %r10, %r9, %r7;
+; CHECK-NEXT:    mul.rn.f32 %r11, %r10, %r8;
+; CHECK-NEXT:    mul.rn.f32 %r12, %r11, %r1;
+; CHECK-NEXT:    mul.rn.f32 %r13, %r12, %r2;
+; CHECK-NEXT:    mul.rn.f32 %r14, %r13, %r3;
+; CHECK-NEXT:    mul.rn.f32 %r15, %r14, %r4;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
 ; CHECK-NEXT:    ret;
   %res = call float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in)
@@ -293,43 +308,90 @@ define float @reduce_fmul_float(<8 x float> %in) {
 }
 
 define float @reduce_fmul_float_reassoc(<8 x float> %in) {
-; CHECK-LABEL: reduce_fmul_float_reassoc(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmul_float_reassoc_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_param_0];
-; CHECK-NEXT:    mul.rn.f32 %r9, %r3, %r7;
-; CHECK-NEXT:    mul.rn.f32 %r10, %r1, %r5;
-; CHECK-NEXT:    mul.rn.f32 %r11, %r4, %r8;
-; CHECK-NEXT:    mul.rn.f32 %r12, %r2, %r6;
-; CHECK-NEXT:    mul.rn.f32 %r13, %r12, %r11;
-; CHECK-NEXT:    mul.rn.f32 %r14, %r10, %r9;
-; CHECK-NEXT:    mul.rn.f32 %r15, %r14, %r13;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_fmul_float_reassoc(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_param_0+16];
+; CHECK-SM80-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-SM80-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmul_float_reassoc_param_0];
+; CHECK-SM80-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-SM80-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-SM80-NEXT:    mul.rn.f32 %r9, %r8, %r4;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r10, %r6, %r2;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r11, %r7, %r3;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r12, %r5, %r1;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r13, %r12, %r11;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r14, %r10, %r9;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r15, %r14, %r13;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_fmul_float_reassoc(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<12>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_param_0+16];
+; CHECK-SM100-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-SM100-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmul_float_reassoc_param_0];
+; CHECK-SM100-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-SM100-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-SM100-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-SM100-NEXT:    mul.rn.f32x2 %rd5, %rd2, %rd4;
+; CHECK-SM100-NEXT:    mul.rn.f32x2 %rd6, %rd1, %rd3;
+; CHECK-SM100-NEXT:    mul.rn.f32x2 %rd7, %rd6, %rd5;
+; CHECK-SM100-NEXT:    mov.b64 {%r9, %r10}, %rd7;
+; CHECK-SM100-NEXT:    mul.rn.f32 %r11, %r10, %r9;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r11;
+; CHECK-SM100-NEXT:    ret;
   %res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <8 x float> %in)
   ret float %res
 }
 
 define float @reduce_fmul_float_reassoc_nonpow2(<7 x float> %in) {
-; CHECK-LABEL: reduce_fmul_float_reassoc_nonpow2(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<14>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r7, [reduce_fmul_float_reassoc_nonpow2_param_0+24];
-; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [reduce_fmul_float_reassoc_nonpow2_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_nonpow2_param_0];
-; CHECK-NEXT:    mul.rn.f32 %r8, %r3, %r7;
-; CHECK-NEXT:    mul.rn.f32 %r9, %r1, %r5;
-; CHECK-NEXT:    mul.rn.f32 %r10, %r9, %r8;
-; CHECK-NEXT:    mul.rn.f32 %r11, %r2, %r6;
-; CHECK-NEXT:    mul.rn.f32 %r12, %r11, %r4;
-; CHECK-NEXT:    mul.rn.f32 %r13, %r10, %r12;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r13;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_fmul_float_reassoc_nonpow2(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<14>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.b32 %r7, [reduce_fmul_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM80-NEXT:    ld.param.v2.b32 {%r5, %r6}, [reduce_fmul_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_nonpow2_param_0];
+; CHECK-SM80-NEXT:    mul.rn.f32 %r8, %r3, %r7;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r9, %r1, %r5;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r10, %r9, %r8;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r11, %r2, %r6;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r12, %r11, %r4;
+; CHECK-SM80-NEXT:    mul.rn.f32 %r13, %r10, %r12;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r13;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_fmul_float_reassoc_nonpow2(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<12>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<8>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b32 {%r5, %r6}, [reduce_fmul_float_reassoc_nonpow2_param_0+16];
+; CHECK-SM100-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-SM100-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmul_float_reassoc_nonpow2_param_0];
+; CHECK-SM100-NEXT:    mov.b64 %rd2, {%r1, %r2};
+; CHECK-SM100-NEXT:    mov.b64 %rd3, {%r3, %r4};
+; CHECK-SM100-NEXT:    ld.param.b32 %r7, [reduce_fmul_float_reassoc_nonpow2_param_0+24];
+; CHECK-SM100-NEXT:    mov.b32 %r8, 0f3F800000;
+; CHECK-SM100-NEXT:    mov.b64 %rd4, {%r7, %r8};
+; CHECK-SM100-NEXT:    mul.rn.f32x2 %rd5, %rd3, %rd4;
+; CHECK-SM100-NEXT:    mul.rn.f32x2 %rd6, %rd2, %rd1;
+; CHECK-SM100-NEXT:    mul.rn.f32x2 %rd7, %rd6, %rd5;
+; CHECK-SM100-NEXT:    mov.b64 {%r9, %r10}, %rd7;
+; CHECK-SM100-NEXT:    mul.rn.f32 %r11, %r9, %r10;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r11;
+; CHECK-SM100-NEXT:    ret;
   %res = call reassoc float @llvm.vector.reduce.fmul(float 1.0, <7 x float> %in)
   ret float %res
 }
@@ -405,15 +467,20 @@ define float @reduce_fmax_float(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fmax_float(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<16>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmax_float_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_param_0];
-; CHECK-NEXT:    max.f32 %r9, %r4, %r8;
-; CHECK-NEXT:    max.f32 %r10, %r2, %r6;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmax_float_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    max.f32 %r9, %r8, %r4;
+; CHECK-NEXT:    max.f32 %r10, %r6, %r2;
 ; CHECK-NEXT:    max.f32 %r11, %r10, %r9;
-; CHECK-NEXT:    max.f32 %r12, %r3, %r7;
-; CHECK-NEXT:    max.f32 %r13, %r1, %r5;
+; CHECK-NEXT:    max.f32 %r12, %r7, %r3;
+; CHECK-NEXT:    max.f32 %r13, %r5, %r1;
 ; CHECK-NEXT:    max.f32 %r14, %r13, %r12;
 ; CHECK-NEXT:    max.f32 %r15, %r14, %r11;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
@@ -427,15 +494,20 @@ define float @reduce_fmax_float_reassoc(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fmax_float_reassoc(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<16>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmax_float_reassoc_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_param_0];
-; CHECK-NEXT:    max.f32 %r9, %r4, %r8;
-; CHECK-NEXT:    max.f32 %r10, %r2, %r6;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmax_float_reassoc_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmax_float_reassoc_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    max.f32 %r9, %r8, %r4;
+; CHECK-NEXT:    max.f32 %r10, %r6, %r2;
 ; CHECK-NEXT:    max.f32 %r11, %r10, %r9;
-; CHECK-NEXT:    max.f32 %r12, %r3, %r7;
-; CHECK-NEXT:    max.f32 %r13, %r1, %r5;
+; CHECK-NEXT:    max.f32 %r12, %r7, %r3;
+; CHECK-NEXT:    max.f32 %r13, %r5, %r1;
 ; CHECK-NEXT:    max.f32 %r14, %r13, %r12;
 ; CHECK-NEXT:    max.f32 %r15, %r14, %r11;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
@@ -537,15 +609,20 @@ define float @reduce_fmin_float(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fmin_float(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<16>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmin_float_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_param_0];
-; CHECK-NEXT:    min.f32 %r9, %r4, %r8;
-; CHECK-NEXT:    min.f32 %r10, %r2, %r6;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmin_float_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    min.f32 %r9, %r8, %r4;
+; CHECK-NEXT:    min.f32 %r10, %r6, %r2;
 ; CHECK-NEXT:    min.f32 %r11, %r10, %r9;
-; CHECK-NEXT:    min.f32 %r12, %r3, %r7;
-; CHECK-NEXT:    min.f32 %r13, %r1, %r5;
+; CHECK-NEXT:    min.f32 %r12, %r7, %r3;
+; CHECK-NEXT:    min.f32 %r13, %r5, %r1;
 ; CHECK-NEXT:    min.f32 %r14, %r13, %r12;
 ; CHECK-NEXT:    min.f32 %r15, %r14, %r11;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
@@ -559,15 +636,20 @@ define float @reduce_fmin_float_reassoc(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fmin_float_reassoc(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<16>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmin_float_reassoc_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_param_0];
-; CHECK-NEXT:    min.f32 %r9, %r4, %r8;
-; CHECK-NEXT:    min.f32 %r10, %r2, %r6;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmin_float_reassoc_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmin_float_reassoc_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    min.f32 %r9, %r8, %r4;
+; CHECK-NEXT:    min.f32 %r10, %r6, %r2;
 ; CHECK-NEXT:    min.f32 %r11, %r10, %r9;
-; CHECK-NEXT:    min.f32 %r12, %r3, %r7;
-; CHECK-NEXT:    min.f32 %r13, %r1, %r5;
+; CHECK-NEXT:    min.f32 %r12, %r7, %r3;
+; CHECK-NEXT:    min.f32 %r13, %r5, %r1;
 ; CHECK-NEXT:    min.f32 %r14, %r13, %r12;
 ; CHECK-NEXT:    min.f32 %r15, %r14, %r11;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
@@ -669,15 +751,20 @@ define float @reduce_fmaximum_float(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fmaximum_float(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<16>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmaximum_float_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_param_0];
-; CHECK-NEXT:    max.NaN.f32 %r9, %r4, %r8;
-; CHECK-NEXT:    max.NaN.f32 %r10, %r2, %r6;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmaximum_float_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    max.NaN.f32 %r9, %r8, %r4;
+; CHECK-NEXT:    max.NaN.f32 %r10, %r6, %r2;
 ; CHECK-NEXT:    max.NaN.f32 %r11, %r10, %r9;
-; CHECK-NEXT:    max.NaN.f32 %r12, %r3, %r7;
-; CHECK-NEXT:    max.NaN.f32 %r13, %r1, %r5;
+; CHECK-NEXT:    max.NaN.f32 %r12, %r7, %r3;
+; CHECK-NEXT:    max.NaN.f32 %r13, %r5, %r1;
 ; CHECK-NEXT:    max.NaN.f32 %r14, %r13, %r12;
 ; CHECK-NEXT:    max.NaN.f32 %r15, %r14, %r11;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
@@ -691,15 +778,20 @@ define float @reduce_fmaximum_float_reassoc(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fmaximum_float_reassoc(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<16>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmaximum_float_reassoc_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_param_0];
-; CHECK-NEXT:    max.NaN.f32 %r9, %r4, %r8;
-; CHECK-NEXT:    max.NaN.f32 %r10, %r2, %r6;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fmaximum_float_reassoc_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fmaximum_float_reassoc_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    max.NaN.f32 %r9, %r8, %r4;
+; CHECK-NEXT:    max.NaN.f32 %r10, %r6, %r2;
 ; CHECK-NEXT:    max.NaN.f32 %r11, %r10, %r9;
-; CHECK-NEXT:    max.NaN.f32 %r12, %r3, %r7;
-; CHECK-NEXT:    max.NaN.f32 %r13, %r1, %r5;
+; CHECK-NEXT:    max.NaN.f32 %r12, %r7, %r3;
+; CHECK-NEXT:    max.NaN.f32 %r13, %r5, %r1;
 ; CHECK-NEXT:    max.NaN.f32 %r14, %r13, %r12;
 ; CHECK-NEXT:    max.NaN.f32 %r15, %r14, %r11;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
@@ -801,15 +893,20 @@ define float @reduce_fminimum_float(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fminimum_float(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<16>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fminimum_float_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_param_0];
-; CHECK-NEXT:    min.NaN.f32 %r9, %r4, %r8;
-; CHECK-NEXT:    min.NaN.f32 %r10, %r2, %r6;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fminimum_float_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    min.NaN.f32 %r9, %r8, %r4;
+; CHECK-NEXT:    min.NaN.f32 %r10, %r6, %r2;
 ; CHECK-NEXT:    min.NaN.f32 %r11, %r10, %r9;
-; CHECK-NEXT:    min.NaN.f32 %r12, %r3, %r7;
-; CHECK-NEXT:    min.NaN.f32 %r13, %r1, %r5;
+; CHECK-NEXT:    min.NaN.f32 %r12, %r7, %r3;
+; CHECK-NEXT:    min.NaN.f32 %r13, %r5, %r1;
 ; CHECK-NEXT:    min.NaN.f32 %r14, %r13, %r12;
 ; CHECK-NEXT:    min.NaN.f32 %r15, %r14, %r11;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
@@ -823,15 +920,20 @@ define float @reduce_fminimum_float_reassoc(<8 x float> %in) {
 ; CHECK-LABEL: reduce_fminimum_float_reassoc(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .b32 %r<16>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fminimum_float_reassoc_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_param_0];
-; CHECK-NEXT:    min.NaN.f32 %r9, %r4, %r8;
-; CHECK-NEXT:    min.NaN.f32 %r10, %r2, %r6;
+; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_fminimum_float_reassoc_param_0+16];
+; CHECK-NEXT:    mov.b64 %rd4, {%r3, %r4};
+; CHECK-NEXT:    mov.b64 %rd3, {%r1, %r2};
+; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_fminimum_float_reassoc_param_0];
+; CHECK-NEXT:    mov.b64 %rd2, {%r7, %r8};
+; CHECK-NEXT:    mov.b64 %rd1, {%r5, %r6};
+; CHECK-NEXT:    min.NaN.f32 %r9, %r8, %r4;
+; CHECK-NEXT:    min.NaN.f32 %r10, %r6, %r2;
 ; CHECK-NEXT:    min.NaN.f32 %r11, %r10, %r9;
-; CHECK-NEXT:    min.NaN.f32 %r12, %r3, %r7;
-; CHECK-NEXT:    min.NaN.f32 %r13, %r1, %r5;
+; CHECK-NEXT:    min.NaN.f32 %r12, %r7, %r3;
+; CHECK-NEXT:    min.NaN.f32 %r13, %r5, %r1;
 ; CHECK-NEXT:    min.NaN.f32 %r14, %r13, %r12;
 ; CHECK-NEXT:    min.NaN.f32 %r15, %r14, %r11;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
diff --git a/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll b/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll
index 8e4c77e76029c..8b48b039d0dd9 100644
--- a/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll
+++ b/llvm/test/CodeGen/NVPTX/unfold-masked-merge-vector-variablemask.ll
@@ -63,13 +63,13 @@ define <4 x i8> @out_v4i8(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwind {
 ; CHECK-NEXT:    .reg .b32 %r<8>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [out_v4i8_param_1];
-; CHECK-NEXT:    ld.param.b32 %r2, [out_v4i8_param_0];
-; CHECK-NEXT:    ld.param.b32 %r3, [out_v4i8_param_2];
-; CHECK-NEXT:    and.b32 %r4, %r2, %r3;
-; CHECK-NEXT:    xor.b32 %r5, %r3, -1;
-; CHECK-NEXT:    and.b32 %r6, %r1, %r5;
-; CHECK-NEXT:    or.b32 %r7, %r4, %r6;
+; CHECK-NEXT:    ld.param.b32 %r1, [out_v4i8_param_0];
+; CHECK-NEXT:    ld.param.b32 %r2, [out_v4i8_param_2];
+; CHECK-NEXT:    and.b32 %r3, %r1, %r2;
+; CHECK-NEXT:    ld.param.b32 %r4, [out_v4i8_param_1];
+; CHECK-NEXT:    xor.b32 %r5, %r2, -1;
+; CHECK-NEXT:    and.b32 %r6, %r4, %r5;
+; CHECK-NEXT:    or.b32 %r7, %r3, %r6;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r7;
 ; CHECK-NEXT:    ret;
   %mx = and <4 x i8> %x, %mask
@@ -85,13 +85,13 @@ define <4 x i8> @out_v4i8_undef(<4 x i8> %x, <4 x i8> %y, <4 x i8> %mask) nounwi
 ; CHECK-NEXT:    .reg .b32 %r<8>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [out_v4i8_undef_param_1];
-; CHECK-NEXT:    ld.param.b32 %r2, [out_v4i8_undef_param_0];
-; CHECK-NEXT:    ld.param.b32 %r3, [out_v4i8_undef_param_2];
-; CHECK-NEXT:    and.b32 %r4, %r2, %r3;
-; CHECK-NEXT:    xor.b32 %r5, %r3, -16711681;
-; CHECK-NEXT:    and.b32 %r6, %r1, %r5;
-; CHECK-NEXT:    or.b32 %r7, %r4, %r6;
+; CHECK-NEXT:    ld.param.b32 %r1, [out_v4i8_undef_param_0];
+; CHECK-NEXT:    ld.param.b32 %r2, [out_v4i8_undef_param_2];
+; CHECK-NEXT:    and.b32 %r3, %r1, %r2;
+; CHECK-NEXT:    ld.param.b32 %r4, [out_v4i8_undef_param_1];
+; CHECK-NEXT:    xor.b32 %r5, %r2, -16711681;
+; CHECK-NEXT:    and.b32 %r6, %r4, %r5;
+; CHECK-NEXT:    or.b32 %r7, %r3, %r6;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r7;
 ; CHECK-NEXT:    ret;
   %mx = and <4 x i8> %x, %mask
@@ -107,13 +107,13 @@ define <2 x i16> @out_v2i16(<2 x i16> %x, <2 x i16> %y, <2 x i16> %mask) nounwin
 ; CHECK-NEXT:    .reg .b32 %r<8>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b32 %r1, [out_v2i16_param_1];
-; CHECK-NEXT:    ld.param.b32 %r2, [out_v2i16_param_0];
-; CHECK-NEXT:    ld.param.b32 %r3, [out_v2i16_param_2];
-; CHECK-NEXT:    and.b32 %r4, %r2, %r3;
-; CHECK-NEXT:    xor.b32 %r5, %r3, -1;
-; CHECK-NEXT:    and.b32 %r6, %r1, %r5;
-; CHECK-NEXT:    or.b32 %r7, %r4, %r6;
+; CHECK-NEXT:    ld.param.b32 %r1, [out_v2i16_param_0];
+; CHECK-NEXT:    ld.param.b32 %r2, [out_v2i16_param_2];
+; CHECK-NEXT:    and.b32 %r3, %r1, %r2;
+; CHECK-NEXT:    ld.param.b32 %r4, [out_v2i16_param_1];
+; CHECK-NEXT:    xor.b32 %r5, %r2, -1;
+; CHECK-NEXT:    and.b32 %r6, %r4, %r5;
+; CHECK-NEXT:    or.b32 %r7, %r3, %r6;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r7;
 ; CHECK-NEXT:    ret;
   %mx = and <2 x i16> %x, %mask
@@ -178,20 +178,27 @@ define <8 x i8> @out_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind {
 define <4 x i16> @out_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind {
 ; CHECK-LABEL: out_v4i16(
 ; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<13>;
 ; CHECK-NEXT:    .reg .b32 %r<15>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [out_v4i16_param_0];
-; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [out_v4i16_param_2];
-; CHECK-NEXT:    and.b32 %r5, %r1, %r3;
-; CHECK-NEXT:    and.b32 %r6, %r2, %r4;
-; CHECK-NEXT:    ld.param.v2.b32 {%r7, %r8}, [out_v4i16_param_1];
-; CHECK-NEXT:    xor.b32 %r9, %r4, -1;
+; CHECK-NEXT:    ld.param.v4.b16 {%rs1, %rs2, %rs3, %rs4}, [out_v4i16_param_0];
+; CHECK-NEXT:    mov.b32 %r1, {%rs3, %rs4};
+; CHECK-NEXT:    mov.b32 %r2, {%rs1, %rs2};
+; CHECK-NEXT:    ld.param.v4.b16 {%rs5, %rs6, %rs7, %rs8}, [out_v4i16_param_2];
+; CHECK-NEXT:    mov.b32 %r3, {%rs5, %rs6};
+; CHECK-NEXT:    and.b32 %r4, %r2, %r3;
+; CHECK-NEXT:    mov.b32 %r5, {%rs7, %rs8};
+; CHECK-NEXT:    and.b32 %r6, %r1, %r5;
+; CHECK-NEXT:    ld.param.v4.b16 {%rs9, %rs10, %rs11, %rs12}, [out_v4i16_param_1];
+; CHECK-NEXT:    mov.b32 %r7, {%rs11, %rs12};
+; CHECK-NEXT:    mov.b32 %r8, {%rs9, %rs10};
+; CHECK-NEXT:    xor.b32 %r9, %r5, -1;
 ; CHECK-NEXT:    xor.b32 %r10, %r3, -1;
-; CHECK-NEXT:    and.b32 %r11, %r7, %r10;
-; CHECK-NEXT:    and.b32 %r12, %r8, %r9;
+; CHECK-NEXT:    and.b32 %r11, %r8, %r10;
+; CHECK-NEXT:    and.b32 %r12, %r7, %r9;
 ; CHECK-NEXT:    or.b32 %r13, %r6, %r12;
-; CHECK-NEXT:    or.b32 %r14, %r5, %r11;
+; CHECK-NEXT:    or.b32 %r14, %r4, %r11;
 ; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r14, %r13};
 ; CHECK-NEXT:    ret;
   %mx = and <4 x i16> %x, %mask
@@ -204,20 +211,27 @@ define <4 x i16> @out_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwin
 define <4 x i16> @out_v4i16_undef(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind {
 ; CHECK-LABEL: out_v4i16_undef(
 ; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<13>;
 ; CHECK-NEXT:    .reg .b32 %r<15>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [out_v4i16_undef_param_0];
-; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [out_v4i16_undef_param_2];
-; CHECK-NEXT:    and.b32 %r5, %r1, %r3;
-; CHECK-NEXT:    and.b32 %r6, %r2, %r4;
-; CHECK-NEXT:    ld.param.v2.b32 {%r7, %r8}, [out_v4i16_undef_param_1];
-; CHECK-NEXT:    xor.b32 %r9, %r4, -65536;
+; CHECK-NEXT:    ld.param.v4.b16 {%rs1, %rs2, %rs3, %rs4}, [out_v4i16_undef_param_0];
+; CHECK-NEXT:    mov.b32 %r1, {%rs3, %rs4};
+; CHECK-NEXT:    mov.b32 %r2, {%rs1, %rs2};
+; CHECK-NEXT:    ld.param.v4.b16 {%rs5, %rs6, %rs7, %rs8}, [out_v4i16_undef_param_2];
+; CHECK-NEXT:    mov.b32 %r3, {%rs5, %rs6};
+; CHECK-NEXT:    and.b32 %r4, %r2, %r3;
+; CHECK-NEXT:    mov.b32 %r5, {%rs7, %rs8};
+; CHECK-NEXT:    and.b32 %r6, %r1, %r5;
+; CHECK-NEXT:    ld.param.v4.b16 {%rs9, %rs10, %rs11, %rs12}, [out_v4i16_undef_param_1];
+; CHECK-NEXT:    mov.b32 %r7, {%rs11, %rs12};
+; CHECK-NEXT:    mov.b32 %r8, {%rs9, %rs10};
+; CHECK-NEXT:    xor.b32 %r9, %r5, -65536;
 ; CHECK-NEXT:    xor.b32 %r10, %r3, -1;
-; CHECK-NEXT:    and.b32 %r11, %r7, %r10;
-; CHECK-NEXT:    and.b32 %r12, %r8, %r9;
+; CHECK-NEXT:    and.b32 %r11, %r8, %r10;
+; CHECK-NEXT:    and.b32 %r12, %r7, %r9;
 ; CHECK-NEXT:    or.b32 %r13, %r6, %r12;
-; CHECK-NEXT:    or.b32 %r14, %r5, %r11;
+; CHECK-NEXT:    or.b32 %r14, %r4, %r11;
 ; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r14, %r13};
 ; CHECK-NEXT:    ret;
   %mx = and <4 x i16> %x, %mask
@@ -566,14 +580,14 @@ define <8 x i8> @in_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind {
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [in_v8i8_param_0];
 ; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [in_v8i8_param_1];
-; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [in_v8i8_param_2];
-; CHECK-NEXT:    xor.b32 %r7, %r2, %r4;
-; CHECK-NEXT:    and.b32 %r8, %r7, %r6;
-; CHECK-NEXT:    xor.b32 %r9, %r8, %r4;
-; CHECK-NEXT:    xor.b32 %r10, %r1, %r3;
-; CHECK-NEXT:    and.b32 %r11, %r10, %r5;
-; CHECK-NEXT:    xor.b32 %r12, %r11, %r3;
-; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r12, %r9};
+; CHECK-NEXT:    xor.b32 %r5, %r2, %r4;
+; CHECK-NEXT:    xor.b32 %r6, %r1, %r3;
+; CHECK-NEXT:    ld.param.v2.b32 {%r7, %r8}, [in_v8i8_param_2];
+; CHECK-NEXT:    and.b32 %r9, %r6, %r7;
+; CHECK-NEXT:    and.b32 %r10, %r5, %r8;
+; CHECK-NEXT:    xor.b32 %r11, %r10, %r4;
+; CHECK-NEXT:    xor.b32 %r12, %r9, %r3;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r12, %r11};
 ; CHECK-NEXT:    ret;
   %n0 = xor <8 x i8> %x, %y
   %n1 = and <8 x i8> %n0, %mask
@@ -584,19 +598,26 @@ define <8 x i8> @in_v8i8(<8 x i8> %x, <8 x i8> %y, <8 x i8> %mask) nounwind {
 define <4 x i16> @in_v4i16(<4 x i16> %x, <4 x i16> %y, <4 x i16> %mask) nounwind {
 ; CHECK-LABEL: in_v4i16(
 ; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<13>;
 ; CHECK-NEXT:    .reg .b32 %r<13>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [in_v4i16_param_0];
-; CHECK-NEXT:    ld.param.v2.b32 {%r3, %r4}, [in_v4i16_param_1];
-; CHECK-NEXT:    ld.param.v2.b32 {%r5, %r6}, [in_v4i16_param_2];
-; CHECK-NEXT:    xor.b32 %r7, %r2, %r4;
-; CHECK-NEXT:    and.b32 %r8, %r7, %r6;
-; CHECK-NEXT:    xor.b32 %r9, %r8, %r4;
-; CHECK-NEXT:    xor.b32 %r10, %r1, %r3;
-; CHECK-NEXT:    and.b32 %r11, %r10, %r5;
-; CHECK-NEXT:    xor.b32 %r12, %r11, %r3;
-; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r12, %r9};
+; CHECK-NEXT:    ld.param.v4.b16 {%rs1, %rs2, %rs3, %rs4}, [in_v4i16_param_0];
+; CHECK-NEXT:    mov.b32 %r1, {%rs1, %rs2};
+; CHECK-NEXT:    mov.b32 %r2, {%rs3, %rs4};
+; CHECK-NEXT:    ld.param.v4.b16 {%rs5, %rs6, %rs7, %rs8}, [in_v4i16_param_1];
+; CHECK-NEXT:    mov.b32 %r3, {%rs7, %rs8};
+; CHECK-NEXT:    xor.b32 %r4, %r2, %r3;
+; CHECK-NEXT:    mov.b32 %r5, {%rs5, %rs6};
+; CHECK-NEXT:    xor.b32 %r6, %r1, %r5;
+; CHECK-NEXT:    ld.param.v4.b16 {%rs9, %rs10, %rs11, %rs12}, [in_v4i16_param_2];
+; CHECK-NEXT:    mov.b32 %r7, {%rs9, %rs10};
+; CHECK-NEXT:    and.b32 %r8, %r6, %r7;
+; CHECK-NEXT:    mov.b32 %r9, {%rs11, %rs12};
+; CHECK-NEXT:    and.b32 %r10, %r4, %r9;
+; CHECK-NEXT:    xor.b32 %r11, %r10, %r3;
+; CHECK-NEXT:    xor.b32 %r12, %r8, %r5;
+; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r12, %r11};
 ; CHECK-NEXT:    ret;
   %n0 = xor <4 x i16> %x, %y
   %n1 = and <4 x i16> %n0, %mask
diff --git a/llvm/test/CodeGen/NVPTX/vector-loads.ll b/llvm/test/CodeGen/NVPTX/vector-loads.ll
index 88ff59407a143..e04bf8ca34544 100644
--- a/llvm/test/CodeGen/NVPTX/vector-loads.ll
+++ b/llvm/test/CodeGen/NVPTX/vector-loads.ll
@@ -101,18 +101,18 @@ define void @foo_complex(ptr nocapture readonly align 16 dereferenceable(1342177
 define void @extv8f16_global_a16(ptr addrspace(1) noalias readonly align 16 %dst, ptr addrspace(1) noalias readonly align 16 %src) #0 {
 ; CHECK: ld.global.v4.b32 {%r
   %v = load <8 x half>, ptr addrspace(1) %src, align 16
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: mov.b32 {%rs
+; CHECK-DAG: mov.b32 {%rs
+; CHECK-DAG: mov.b32 {%rs
+; CHECK-DAG: mov.b32 {%rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
   %ext = fpext <8 x half> %v to <8 x float>
 ; CHECK: st.global.v4.b32
 ; CHECK: st.global.v4.b32
@@ -151,18 +151,18 @@ define void @extv8f16_global_a4(ptr addrspace(1) noalias readonly align 16 %dst,
 define void @extv8f16_generic_a16(ptr noalias readonly align 16 %dst, ptr noalias readonly align 16 %src) #0 {
 ; CHECK: ld.v4.b32 {%r
   %v = load <8 x half>, ptr %src, align 16
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: mov.b32 {%rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
-; CHECK: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: mov.b32 {%rs
+; CHECK-DAG: mov.b32 {%rs
+; CHECK-DAG: mov.b32 {%rs
+; CHECK-DAG: mov.b32 {%rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
+; CHECK-DAG: cvt.f32.f16 %r{{.*}}, %rs
   %ext = fpext <8 x half> %v to <8 x float>
 ; CHECK: st.v4.b32
 ; CHECK: st.v4.b32



More information about the llvm-commits mailing list