[llvm] [NVPTX] Make i16x2 a native type and add supported vec instructions (PR #65432)

via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 6 11:38:58 PDT 2023


https://github.com/ThomasRaoux updated https://github.com/llvm/llvm-project/pull/65432:

>From 21aba599aa7b43cd44aa9be9253941747a70ec8b Mon Sep 17 00:00:00 2001
From: Thomas Raoux <thomas.raoux at openai.com>
Date: Tue, 5 Sep 2023 18:02:05 -0700
Subject: [PATCH 1/2] [NVPTX] Make i16x2 a native type and add support for
 instructions supporting it

On sm_90 some instructions now support i16x2 which allows hardware to
execute more efficiently add, min and max instructions.

In order to support that we need to make i16x2 a native type in the backend.
This does the necessary changes to make i16x2 a native type and adds support
for the instructions natively supporting i16x2.

This caused a negative test in nvptx slp to start passing. Changed the test
to a positive one as the IR is correctly vectorized.
---
 llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp   |  24 +-
 llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp   | 133 ++++-
 llvm/lib/Target/NVPTX/NVPTXInstrInfo.td       | 139 +++--
 llvm/lib/Target/NVPTX/NVPTXIntrinsics.td      | 363 ++++++------
 llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td    |   2 +-
 llvm/test/CodeGen/NVPTX/dag-cse.ll            |   4 +-
 llvm/test/CodeGen/NVPTX/f16x2-instructions.ll |  10 +-
 llvm/test/CodeGen/NVPTX/i16x2-instructions.ll | 533 ++++++++++++++++++
 .../NVPTX/load-with-non-coherent-cache.ll     |   4 +-
 llvm/test/CodeGen/NVPTX/param-load-store.ll   |  15 +-
 llvm/test/CodeGen/NVPTX/vec-param-load.ll     |  12 +-
 ...non-vectorizable-intrinsic-inseltpoison.ll |  57 --
 .../NVPTX/non-vectorizable-intrinsic.ll       |  57 --
 .../NVPTX/vectorizable-intrinsic.ll           |  42 ++
 14 files changed, 985 insertions(+), 410 deletions(-)
 create mode 100644 llvm/test/CodeGen/NVPTX/i16x2-instructions.ll
 delete mode 100644 llvm/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic-inseltpoison.ll
 delete mode 100644 llvm/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic.ll
 create mode 100644 llvm/test/Transforms/SLPVectorizer/NVPTX/vectorizable-intrinsic.ll

diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index 99a7fdb9d1e21f9..0091acc456eb402 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -615,7 +615,7 @@ bool NVPTXDAGToDAGISel::tryEXTRACT_VECTOR_ELEMENT(SDNode *N) {
   // We only care about f16x2 as it's the only real vector type we
   // need to deal with.
   MVT VT = Vector.getSimpleValueType();
-  if (!(VT == MVT::v2f16 || VT == MVT::v2bf16))
+  if (!(VT == MVT::v2f16 || VT == MVT::v2bf16 || VT == MVT::v2i16))
     return false;
   // Find and record all uses of this vector that extract element 0 or 1.
   SmallVector<SDNode *, 4> E0, E1;
@@ -828,6 +828,7 @@ pickOpcodeForVT(MVT::SimpleValueType VT, unsigned Opcode_i8,
     return Opcode_i16;
   case MVT::v2f16:
   case MVT::v2bf16:
+  case MVT::v2i16:
     return Opcode_i32;
   case MVT::f32:
     return Opcode_f32;
@@ -909,9 +910,10 @@ bool NVPTXDAGToDAGISel::tryLoad(SDNode *N) {
   // Vector Setting
   unsigned vecType = NVPTX::PTXLdStInstCode::Scalar;
   if (SimpleVT.isVector()) {
-    assert((LoadedVT == MVT::v2f16 || LoadedVT == MVT::v2bf16) &&
+    assert((LoadedVT == MVT::v2f16 || LoadedVT == MVT::v2bf16 ||
+            LoadedVT == MVT::v2i16) &&
            "Unexpected vector type");
-    // v2f16/v2bf16 is loaded using ld.b32
+    // v2f16/v2bf16/v2i16 is loaded using ld.b32
     fromTypeWidth = 32;
   }
 
@@ -1064,7 +1066,7 @@ bool NVPTXDAGToDAGISel::tryLoadVector(SDNode *N) {
   // v8f16 is a special case. PTX doesn't have ld.v8.f16
   // instruction. Instead, we split the vector into v2f16 chunks and
   // load them with ld.v4.b32.
-  if (EltVT == MVT::v2f16 || EltVT == MVT::v2bf16) {
+  if (EltVT == MVT::v2f16 || EltVT == MVT::v2bf16 || EltVT == MVT::v2i16) {
     assert(N->getOpcode() == NVPTXISD::LoadV4 && "Unexpected load opcode.");
     EltVT = MVT::i32;
     FromType = NVPTX::PTXLdStInstCode::Untyped;
@@ -1262,10 +1264,11 @@ bool NVPTXDAGToDAGISel::tryLDGLDU(SDNode *N) {
     EltVT = EltVT.getVectorElementType();
     // vectors of f16 are loaded/stored as multiples of v2f16 elements.
     if ((EltVT == MVT::f16 && N->getValueType(0) == MVT::v2f16) ||
-        (EltVT == MVT::bf16 && N->getValueType(0) == MVT::v2bf16)) {
-          assert(NumElts % 2 == 0 && "Vector must have even number of elements");
-          EltVT = N->getValueType(0);
-          NumElts /= 2;
+        (EltVT == MVT::bf16 && N->getValueType(0) == MVT::v2bf16) ||
+        (EltVT == MVT::i16 && N->getValueType(0) == MVT::v2i16)) {
+      assert(NumElts % 2 == 0 && "Vector must have even number of elements");
+      EltVT = N->getValueType(0);
+      NumElts /= 2;
     }
   }
 
@@ -1678,7 +1681,8 @@ bool NVPTXDAGToDAGISel::tryStore(SDNode *N) {
   MVT ScalarVT = SimpleVT.getScalarType();
   unsigned toTypeWidth = ScalarVT.getSizeInBits();
   if (SimpleVT.isVector()) {
-    assert((StoreVT == MVT::v2f16 || StoreVT == MVT::v2bf16) &&
+    assert((StoreVT == MVT::v2f16 || StoreVT == MVT::v2bf16 ||
+            StoreVT == MVT::v2i16) &&
            "Unexpected vector type");
     // v2f16 is stored using st.b32
     toTypeWidth = 32;
@@ -1847,7 +1851,7 @@ bool NVPTXDAGToDAGISel::tryStoreVector(SDNode *N) {
   // v8f16 is a special case. PTX doesn't have st.v8.f16
   // instruction. Instead, we split the vector into v2f16 chunks and
   // store them with st.v4.b32.
-  if (EltVT == MVT::v2f16 || EltVT == MVT::v2bf16) {
+  if (EltVT == MVT::v2f16 || EltVT == MVT::v2bf16 || EltVT == MVT::v2i16) {
     assert(N->getOpcode() == NVPTXISD::StoreV4 && "Unexpected load opcode.");
     EltVT = MVT::i32;
     ToType = NVPTX::PTXLdStInstCode::Untyped;
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index f12f4fe3af33f06..3e36a72e1a53b52 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -133,6 +133,7 @@ static bool IsPTXVectorType(MVT VT) {
   case MVT::v4i8:
   case MVT::v2i16:
   case MVT::v4i16:
+  case MVT::v8i16: // <4 x i16x2>
   case MVT::v2i32:
   case MVT::v4i32:
   case MVT::v2i64:
@@ -149,12 +150,13 @@ static bool IsPTXVectorType(MVT VT) {
   }
 }
 
-static bool Isv2f16Orv2bf16Type(EVT VT) {
-  return (VT == MVT::v2f16 || VT == MVT::v2bf16);
+static bool Isv2f16Orv2bf16Orv2i16Type(EVT VT) {
+  return (VT == MVT::v2f16 || VT == MVT::v2bf16 || VT == MVT::v2i16);
 }
 
-static bool Isf16Orbf16Type(MVT VT) {
-  return (VT.SimpleTy == MVT::f16 || VT.SimpleTy == MVT::bf16);
+static bool Is16bitsType(MVT VT) {
+  return (VT.SimpleTy == MVT::f16 || VT.SimpleTy == MVT::bf16 ||
+          VT.SimpleTy == MVT::i16);
 }
 
 /// ComputePTXValueVTs - For the given Type \p Ty, returns the set of primitive
@@ -207,8 +209,13 @@ static void ComputePTXValueVTs(const TargetLowering &TLI, const DataLayout &DL,
       // Vectors with an even number of f16 elements will be passed to
       // us as an array of v2f16/v2bf16 elements. We must match this so we
       // stay in sync with Ins/Outs.
-      if ((Isf16Orbf16Type(EltVT.getSimpleVT())) && NumElts % 2 == 0) {
-        EltVT = EltVT == MVT::f16 ? MVT::v2f16 : MVT::v2bf16;
+      if ((Is16bitsType(EltVT.getSimpleVT())) && NumElts % 2 == 0) {
+        if (EltVT == MVT::f16)
+          EltVT = MVT::v2f16;
+        else if (EltVT == MVT::bf16)
+          EltVT = MVT::v2bf16;
+        else if (EltVT == MVT::i16)
+          EltVT = MVT::v2i16;
         NumElts /= 2;
       }
       for (unsigned j = 0; j != NumElts; ++j) {
@@ -427,8 +434,26 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
         Op, VT, IsOpSupported ? Action : NoBF16Action);
   };
 
+  auto setI16x2OperationAction = [&](unsigned Op, MVT VT, LegalizeAction Action,
+                                     LegalizeAction NoI16x2Action) {
+    bool IsOpSupported = false;
+    // instructions are available on sm_90 only
+    switch (Op) {
+    case ISD::ADD:
+    case ISD::SMAX:
+    case ISD::SMIN:
+    case ISD::UMIN:
+    case ISD::UMAX:
+    case ISD::SUB:
+      IsOpSupported = STI.getSmVersion() >= 90 && STI.getPTXVersion() >= 80;
+      break;
+    }
+    setOperationAction(Op, VT, IsOpSupported ? Action : NoI16x2Action);
+  };
+
   addRegisterClass(MVT::i1, &NVPTX::Int1RegsRegClass);
   addRegisterClass(MVT::i16, &NVPTX::Int16RegsRegClass);
+  addRegisterClass(MVT::v2i16, &NVPTX::Int32RegsRegClass);
   addRegisterClass(MVT::i32, &NVPTX::Int32RegsRegClass);
   addRegisterClass(MVT::i64, &NVPTX::Int64RegsRegClass);
   addRegisterClass(MVT::f32, &NVPTX::Float32RegsRegClass);
@@ -459,9 +484,17 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
 
   setBF16OperationAction(ISD::SETCC, MVT::bf16, Legal, Promote);
   setBF16OperationAction(ISD::SETCC, MVT::v2bf16, Legal, Expand);
+
+  // Conversion to/from i16/i16x2 is always legal.
+  setOperationAction(ISD::BUILD_VECTOR, MVT::v2i16, Custom);
+  setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i16, Custom);
+  setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i16, Expand);
+  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i16, Expand);
+
   // Operations not directly supported by NVPTX.
-  for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
-                 MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::i32, MVT::i64}) {
+  for (MVT VT :
+       {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32, MVT::f64,
+        MVT::i1, MVT::i8, MVT::i16, MVT::v2i16, MVT::i32, MVT::i64}) {
     setOperationAction(ISD::SELECT_CC, VT, Expand);
     setOperationAction(ISD::BR_CC, VT, Expand);
   }
@@ -473,6 +506,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Legal);
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
   setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
+  setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Expand);
 
   setOperationAction(ISD::SHL_PARTS, MVT::i32  , Custom);
   setOperationAction(ISD::SRA_PARTS, MVT::i32  , Custom);
@@ -493,10 +527,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   setOperationAction(ISD::ROTR, MVT::i32, Legal);
 
   setOperationAction(ISD::ROTL, MVT::i16, Expand);
+  setOperationAction(ISD::ROTL, MVT::v2i16, Expand);
   setOperationAction(ISD::ROTR, MVT::i16, Expand);
+  setOperationAction(ISD::ROTR, MVT::v2i16, Expand);
   setOperationAction(ISD::ROTL, MVT::i8, Expand);
   setOperationAction(ISD::ROTR, MVT::i8, Expand);
   setOperationAction(ISD::BSWAP, MVT::i16, Expand);
+  setOperationAction(ISD::BSWAP, MVT::v2i16, Expand);
   setOperationAction(ISD::BSWAP, MVT::i32, Expand);
   setOperationAction(ISD::BSWAP, MVT::i64, Expand);
 
@@ -584,6 +621,22 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
     setOperationAction(ISD::CTLZ, Ty, Legal);
   }
 
+  setI16x2OperationAction(ISD::ABS, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::SMIN, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::SMAX, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::UMIN, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::UMAX, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::CTPOP, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::CTLZ, MVT::v2i16, Legal, Expand);
+
+  setI16x2OperationAction(ISD::ADD, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::SUB, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::AND, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::MUL, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::SHL, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::SREM, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::UREM, MVT::v2i16, Legal, Expand);
+
   setOperationAction(ISD::ADDC, MVT::i32, Legal);
   setOperationAction(ISD::ADDE, MVT::i32, Legal);
   setOperationAction(ISD::SUBC, MVT::i32, Legal);
@@ -596,6 +649,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   }
 
   setOperationAction(ISD::CTTZ, MVT::i16, Expand);
+  setOperationAction(ISD::CTTZ, MVT::v2i16, Expand);
   setOperationAction(ISD::CTTZ, MVT::i32, Expand);
   setOperationAction(ISD::CTTZ, MVT::i64, Expand);
 
@@ -1318,7 +1372,7 @@ NVPTXTargetLowering::getPreferredVectorAction(MVT VT) const {
   if (!VT.isScalableVector() && VT.getVectorNumElements() != 1 &&
       VT.getScalarType() == MVT::i1)
     return TypeSplitVector;
-  if (Isv2f16Orv2bf16Type(VT))
+  if (Isv2f16Orv2bf16Orv2i16Type(VT))
     return TypeLegal;
   return TargetLoweringBase::getPreferredVectorAction(VT);
 }
@@ -2098,15 +2152,31 @@ NVPTXTargetLowering::LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const {
 // generates good SASS in both cases.
 SDValue NVPTXTargetLowering::LowerBUILD_VECTOR(SDValue Op,
                                                SelectionDAG &DAG) const {
-  if (!(Isv2f16Orv2bf16Type(Op->getValueType(0)) &&
-        isa<ConstantFPSDNode>(Op->getOperand(0)) &&
-        isa<ConstantFPSDNode>(Op->getOperand(1))))
+  EVT VT = Op->getValueType(0);
+  if (!(Isv2f16Orv2bf16Orv2i16Type(VT)))
     return Op;
+  APInt E0;
+  APInt E1;
+  if (VT == MVT::v2f16 || VT == MVT::v2bf16) {
+    if (!(isa<ConstantFPSDNode>(Op->getOperand(0)) &&
+          isa<ConstantFPSDNode>(Op->getOperand(1))))
+      return Op;
+
+    E0 = cast<ConstantFPSDNode>(Op->getOperand(0))
+             ->getValueAPF()
+             .bitcastToAPInt();
+    E1 = cast<ConstantFPSDNode>(Op->getOperand(1))
+             ->getValueAPF()
+             .bitcastToAPInt();
+  } else {
+    assert(VT == MVT::v2i16);
+    if (!(isa<ConstantSDNode>(Op->getOperand(0)) &&
+          isa<ConstantSDNode>(Op->getOperand(1))))
+      return Op;
 
-  APInt E0 =
-      cast<ConstantFPSDNode>(Op->getOperand(0))->getValueAPF().bitcastToAPInt();
-  APInt E1 =
-      cast<ConstantFPSDNode>(Op->getOperand(1))->getValueAPF().bitcastToAPInt();
+    E0 = cast<ConstantSDNode>(Op->getOperand(0))->getAPIntValue();
+    E1 = cast<ConstantSDNode>(Op->getOperand(1))->getAPIntValue();
+  }
   SDValue Const =
       DAG.getConstant(E1.zext(32).shl(16) | E0.zext(32), SDLoc(Op), MVT::i32);
   return DAG.getNode(ISD::BITCAST, SDLoc(Op), Op->getValueType(0), Const);
@@ -2122,7 +2192,8 @@ SDValue NVPTXTargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
   // Extract individual elements and select one of them.
   SDValue Vector = Op->getOperand(0);
   EVT VectorVT = Vector.getValueType();
-  assert(VectorVT == MVT::v2f16 && "Unexpected vector type.");
+  assert((VectorVT == MVT::v2f16 || VectorVT == MVT::v2i16) &&
+         "Unexpected vector type.");
   EVT EltVT = VectorVT.getVectorElementType();
 
   SDLoc dl(Op.getNode());
@@ -2470,7 +2541,7 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
 
   // v2f16 is legal, so we can't rely on legalizer to handle unaligned
   // loads and have to handle it here.
-  if (Isv2f16Orv2bf16Type(Op.getValueType())) {
+  if (Isv2f16Orv2bf16Orv2i16Type(Op.getValueType())) {
     LoadSDNode *Load = cast<LoadSDNode>(Op);
     EVT MemVT = Load->getMemoryVT();
     if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
@@ -2515,13 +2586,13 @@ SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
 
   // v2f16 is legal, so we can't rely on legalizer to handle unaligned
   // stores and have to handle it here.
-  if (Isv2f16Orv2bf16Type(VT) &&
+  if (Isv2f16Orv2bf16Orv2i16Type(VT) &&
       !allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
                                       VT, *Store->getMemOperand()))
     return expandUnalignedStore(Store, DAG);
 
-  // v2f16 and v2bf16 don't need special handling.
-  if (VT == MVT::v2f16 || VT == MVT::v2bf16)
+  // v2f16, v2bf16 and v2i16 don't need special handling.
+  if (VT == MVT::v2f16 || VT == MVT::v2bf16 || VT == MVT::v2i16)
     return SDValue();
 
   if (VT.isVector())
@@ -2562,6 +2633,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
     case MVT::v4f32:
     case MVT::v8f16: // <4 x f16x2>
     case MVT::v8bf16: // <4 x bf16x2>
+    case MVT::v8i16:  // <4 x i16x2>
       // This is a "native" vector type
       break;
     }
@@ -2606,8 +2678,7 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
       // v8f16 is a special case. PTX doesn't have st.v8.f16
       // instruction. Instead, we split the vector into v2f16 chunks and
       // store them with st.v4.b32.
-      assert(Isf16Orbf16Type(EltVT.getSimpleVT()) &&
-             "Wrong type for the vector.");
+      assert(Is16bitsType(EltVT.getSimpleVT()) && "Wrong type for the vector.");
       Opcode = NVPTXISD::StoreV4;
       StoreF16x2 = true;
       break;
@@ -2793,7 +2864,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
           EVT LoadVT = EltVT;
           if (EltVT == MVT::i1)
             LoadVT = MVT::i8;
-          else if (Isv2f16Orv2bf16Type(EltVT))
+          else if (Isv2f16Orv2bf16Orv2i16Type(EltVT))
             // getLoad needs a vector type, but it can't handle
             // vectors which contain v2f16 or v2bf16 elements. So we must load
             // using i32 here and then bitcast back.
@@ -2819,7 +2890,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
             if (EltVT == MVT::i1)
               Elt = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Elt);
             // v2f16 was loaded as an i32. Now we must bitcast it back.
-            else if (Isv2f16Orv2bf16Type(EltVT))
+            else if (Isv2f16Orv2bf16Orv2i16Type(EltVT))
               Elt = DAG.getNode(ISD::BITCAST, dl, EltVT, Elt);
 
             // If a promoted integer type is used, truncate down to the original
@@ -5198,6 +5269,7 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
   case MVT::v4f16:
   case MVT::v4f32:
   case MVT::v8f16: // <4 x f16x2>
+  case MVT::v8i16: // <4 x i16x2>
     // This is a "native" vector type
     break;
   }
@@ -5250,11 +5322,16 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
     // v8f16 is a special case. PTX doesn't have ld.v8.f16
     // instruction. Instead, we split the vector into v2f16 chunks and
     // load them with ld.v4.b32.
-    assert(Isf16Orbf16Type(EltVT.getSimpleVT()) &&
-           "Unsupported v8 vector type.");
+    assert(Is16bitsType(EltVT.getSimpleVT()) && "Unsupported v8 vector type.");
     LoadF16x2 = true;
     Opcode = NVPTXISD::LoadV4;
-    EVT VVT = (EltVT == MVT::f16) ? MVT::v2f16 : MVT::v2bf16;
+    EVT VVT;
+    if (EltVT == MVT::f16)
+      VVT = MVT::v2f16;
+    else if (EltVT == MVT::bf16)
+      VVT = MVT::v2bf16;
+    else if (EltVT == MVT::i16)
+      VVT = MVT::v2i16;
     EVT ListVTs[] = {VVT, VVT, VVT, VVT, MVT::Other};
     LdResVTs = DAG.getVTList(ListVTs);
     break;
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index b98f76ed4b38d95..3d5e84c2298ca66 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -165,6 +165,7 @@ class ValueToRegClass<ValueType T> {
    NVPTXRegClass ret = !cond(
      !eq(name, "i1"): Int1Regs,
      !eq(name, "i16"): Int16Regs,
+     !eq(name, "v2i16"): Int32Regs,
      !eq(name, "i32"): Int32Regs,
      !eq(name, "i64"): Int64Regs,
      !eq(name, "f16"): Int16Regs,
@@ -199,11 +200,11 @@ multiclass I3<string OpcStr, SDNode OpNode> {
   def i32rr :
     NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
               !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
-              [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+              [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 Int32Regs:$b)))]>;
   def i32ri :
     NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
               !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
-              [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+              [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), imm:$b))]>;
   def i16rr :
     NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
               !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
@@ -214,6 +215,12 @@ multiclass I3<string OpcStr, SDNode OpNode> {
               [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
 }
 
+class I16x2<string OpcStr, SDNode OpNode> :
+ NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
+              !strconcat(OpcStr, "16x2 \t$dst, $a, $b;"),
+              [(set Int32Regs:$dst, (OpNode (v2i16 Int32Regs:$a), (v2i16 Int32Regs:$b)))]>,
+              Requires<[hasPTX<80>, hasSM<90>]>;
+
 // Template for instructions which take 3 int args.  The instructions are
 // named "<OpcStr>.s32" (e.g. "addc.cc.s32").
 multiclass ADD_SUB_INT_CARRY<string OpcStr, SDNode OpNode> {
@@ -221,11 +228,11 @@ multiclass ADD_SUB_INT_CARRY<string OpcStr, SDNode OpNode> {
     def i32rr :
       NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
                 !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
-                [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+                [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 Int32Regs:$b)))]>;
     def i32ri :
       NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
                 !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
-                [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+                [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), imm:$b))]>;
     def i64rr :
       NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
                 !strconcat(OpcStr, ".s64 \t$dst, $a, $b;"),
@@ -747,6 +754,13 @@ def SELP_f16x2rr :
               [(set Int32Regs:$dst,
                     (select Int1Regs:$p, (v2f16 Int32Regs:$a), (v2f16 Int32Regs:$b)))]>;
 
+def SELP_i16x2rr :
+    NVPTXInst<(outs Int32Regs:$dst),
+              (ins Int32Regs:$a, Int32Regs:$b, Int1Regs:$p),
+              "selp.b32 \t$dst, $a, $b, $p;",
+              [(set Int32Regs:$dst,
+                    (select Int1Regs:$p, (v2i16 Int32Regs:$a), (v2i16 Int32Regs:$b)))]>;
+
 //-----------------------------------
 // Test Instructions
 //-----------------------------------
@@ -787,6 +801,9 @@ defm SUB_i1 : ADD_SUB_i1<sub>;
 defm ADD : I3<"add.s", add>;
 defm SUB : I3<"sub.s", sub>;
 
+def ADD16x2 : I16x2<"add.s", add>;
+def SUB16x2 : I16x2<"sub.s", sub>;
+
 // in32 and int64 addition and subtraction with carry-out.
 defm ADDCC : ADD_SUB_INT_CARRY<"add.cc", addc>;
 defm SUBCC : ADD_SUB_INT_CARRY<"sub.cc", subc>;
@@ -811,14 +828,14 @@ defm UREM : I3<"rem.u", urem>;
 // Integer absolute value.  NumBits should be one minus the bit width of RC.
 // This idiom implements the algorithm at
 // http://graphics.stanford.edu/~seander/bithacks.html#IntegerAbs.
-multiclass ABS<RegisterClass RC, string SizeName> {
+multiclass ABS<ValueType T, RegisterClass RC, string SizeName> {
   def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
                   !strconcat("abs", SizeName, " \t$dst, $a;"),
-                  [(set RC:$dst, (abs RC:$a))]>;
+                  [(set (T RC:$dst), (abs (T RC:$a)))]>;
 }
-defm ABS_16 : ABS<Int16Regs, ".s16">;
-defm ABS_32 : ABS<Int32Regs, ".s32">;
-defm ABS_64 : ABS<Int64Regs, ".s64">;
+defm ABS_16 : ABS<i16, Int16Regs, ".s16">;
+defm ABS_32 : ABS<i32, Int32Regs, ".s32">;
+defm ABS_64 : ABS<i64, Int64Regs, ".s64">;
 
 // Integer min/max.
 defm SMAX : I3<"max.s", smax>;
@@ -826,6 +843,12 @@ defm UMAX : I3<"max.u", umax>;
 defm SMIN : I3<"min.s", smin>;
 defm UMIN : I3<"min.u", umin>;
 
+def SMAX16x2 : I16x2<"max.s", smax>;
+def UMAX16x2 : I16x2<"max.u", umax>;
+def SMIN16x2 : I16x2<"min.s", smin>;
+def UMIN16x2 : I16x2<"min.u", umin>;
+
+
 //
 // Wide multiplication
 //
@@ -890,13 +913,13 @@ def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
 def : Pat<(i64 (mul_wide_signed i32:$a, i32:$b)),
           (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_signed Int32Regs:$a, imm:$b)),
+def : Pat<(i64 (mul_wide_signed (i32 Int32Regs:$a), imm:$b)),
           (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
       Requires<[doMulWide]>;
 def : Pat<(i64 (mul_wide_unsigned i32:$a, i32:$b)),
           (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned Int32Regs:$a, imm:$b)),
+def : Pat<(i64 (mul_wide_unsigned (i32 Int32Regs:$a), imm:$b)),
           (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
       Requires<[doMulWide]>;
 
@@ -1022,22 +1045,22 @@ def MAD32rrr :
   NVPTXInst<(outs Int32Regs:$dst),
             (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
             "mad.lo.s32 \t$dst, $a, $b, $c;",
-            [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, Int32Regs:$c))]>;
+            [(set (i32 Int32Regs:$dst), (imad (i32 Int32Regs:$a), (i32 Int32Regs:$b), (i32 Int32Regs:$c)))]>;
 def MAD32rri :
   NVPTXInst<(outs Int32Regs:$dst),
             (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
             "mad.lo.s32 \t$dst, $a, $b, $c;",
-            [(set Int32Regs:$dst, (imad Int32Regs:$a, Int32Regs:$b, imm:$c))]>;
+            [(set (i32 Int32Regs:$dst), (imad (i32 Int32Regs:$a), (i32 Int32Regs:$b), imm:$c))]>;
 def MAD32rir :
   NVPTXInst<(outs Int32Regs:$dst),
             (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
             "mad.lo.s32 \t$dst, $a, $b, $c;",
-            [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, Int32Regs:$c))]>;
+            [(set (i32 Int32Regs:$dst), (imad (i32 Int32Regs:$a), imm:$b, (i32 Int32Regs:$c)))]>;
 def MAD32rii :
   NVPTXInst<(outs Int32Regs:$dst),
             (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
             "mad.lo.s32 \t$dst, $a, $b, $c;",
-            [(set Int32Regs:$dst, (imad Int32Regs:$a, imm:$b, imm:$c))]>;
+            [(set (i32 Int32Regs:$dst), (imad (i32 Int32Regs:$a), imm:$b, imm:$c))]>;
 
 def MAD64rrr :
   NVPTXInst<(outs Int64Regs:$dst),
@@ -1067,7 +1090,7 @@ def INEG16 :
 def INEG32 :
   NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
             "neg.s32 \t$dst, $src;",
-            [(set Int32Regs:$dst, (ineg Int32Regs:$src))]>;
+            [(set (i32 Int32Regs:$dst), (ineg (i32 Int32Regs:$src)))]>;
 def INEG64 :
   NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
             "neg.s64 \t$dst, $src;",
@@ -1458,11 +1481,11 @@ multiclass BITWISE<string OpcStr, SDNode OpNode> {
   def b32rr :
     NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
               !strconcat(OpcStr, ".b32  \t$dst, $a, $b;"),
-              [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+              [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 Int32Regs:$b)))]>;
   def b32ri :
     NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
               !strconcat(OpcStr, ".b32  \t$dst, $a, $b;"),
-              [(set Int32Regs:$dst, (OpNode Int32Regs:$a, imm:$b))]>;
+              [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), imm:$b))]>;
   def b64rr :
     NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
               !strconcat(OpcStr, ".b64  \t$dst, $a, $b;"),
@@ -1485,7 +1508,7 @@ def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
                       [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
 def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
                       "not.b32 \t$dst, $src;",
-                      [(set Int32Regs:$dst, (not Int32Regs:$src))]>;
+                      [(set (i32 Int32Regs:$dst), (not (i32 Int32Regs:$src)))]>;
 def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
                        "not.b64 \t$dst, $src;",
                        [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
@@ -1499,7 +1522,7 @@ multiclass SHIFT<string OpcStr, SDNode OpNode> {
    def i64rr :
      NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
-               [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int32Regs:$b))]>;
+               [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 Int32Regs:$b)))]>;
    def i64ri :
      NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
                !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
@@ -1507,11 +1530,11 @@ multiclass SHIFT<string OpcStr, SDNode OpNode> {
    def i32rr :
      NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode Int32Regs:$a, Int32Regs:$b))]>;
+               [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 Int32Regs:$b)))]>;
    def i32ri :
      NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
                !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode Int32Regs:$a, (i32 imm:$b)))]>;
+               [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 imm:$b)))]>;
    def i32ii :
      NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
                !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
@@ -1519,7 +1542,7 @@ multiclass SHIFT<string OpcStr, SDNode OpNode> {
    def i16rr :
      NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int32Regs:$b))]>;
+               [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 Int32Regs:$b)))]>;
    def i16ri :
      NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
                !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
@@ -1534,7 +1557,7 @@ defm SRL : SHIFT<"shr.u", srl>;
 def BREV32 :
   NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
              "brev.b32 \t$dst, $a;",
-             [(set Int32Regs:$dst, (bitreverse Int32Regs:$a))]>;
+             [(set Int32Regs:$dst, (bitreverse (i32 Int32Regs:$a)))]>;
 def BREV64 :
   NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
              "brev.b64 \t$dst, $a;",
@@ -1550,13 +1573,13 @@ def BREV64 :
 def ROTL32imm_hw :
   NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
             "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
-            [(set Int32Regs:$dst, (rotl Int32Regs:$src, (i32 imm:$amt)))]>,
+            [(set Int32Regs:$dst, (rotl (i32 Int32Regs:$src), (i32 imm:$amt)))]>,
            Requires<[hasHWROT32]>;
 
 def ROTL32reg_hw :
   NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
             "shf.l.wrap.b32 \t$dst, $src, $src, $amt;",
-            [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+            [(set Int32Regs:$dst, (rotl (i32 Int32Regs:$src), (i32 Int32Regs:$amt)))]>,
            Requires<[hasHWROT32]>;
 
 // 32 bit r2 = rotr r1, n
@@ -1565,13 +1588,13 @@ def ROTL32reg_hw :
 def ROTR32imm_hw :
   NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, i32imm:$amt),
             "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
-            [(set Int32Regs:$dst, (rotr Int32Regs:$src, (i32 imm:$amt)))]>,
+            [(set Int32Regs:$dst, (rotr (i32 Int32Regs:$src), (i32 imm:$amt)))]>,
            Requires<[hasHWROT32]>;
 
 def ROTR32reg_hw :
   NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$amt),
             "shf.r.wrap.b32 \t$dst, $src, $src, $amt;",
-            [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+            [(set Int32Regs:$dst, (rotr (i32 Int32Regs:$src), (i32 Int32Regs:$amt)))]>,
            Requires<[hasHWROT32]>;
 
 // 32-bit software rotate by immediate.  $amt2 should equal 32 - $amt1.
@@ -1591,10 +1614,10 @@ def SUB_FRM_32 : SDNodeXForm<imm, [{
   return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), MVT::i32);
 }]>;
 
-def : Pat<(rotl Int32Regs:$src, (i32 imm:$amt)),
+def : Pat<(rotl (i32 Int32Regs:$src), (i32 imm:$amt)),
           (ROT32imm_sw Int32Regs:$src, imm:$amt, (SUB_FRM_32 node:$amt))>,
       Requires<[noHWROT32]>;
-def : Pat<(rotr Int32Regs:$src, (i32 imm:$amt)),
+def : Pat<(rotr (i32 Int32Regs:$src), (i32 imm:$amt)),
           (ROT32imm_sw Int32Regs:$src, (SUB_FRM_32 node:$amt), imm:$amt)>,
       Requires<[noHWROT32]>;
 
@@ -1610,7 +1633,7 @@ def ROTL32reg_sw :
             "shr.b32 \t%rhs, $src, %amt2;\n\t"
             "add.u32 \t$dst, %lhs, %rhs;\n\t"
             "}}",
-            [(set Int32Regs:$dst, (rotl Int32Regs:$src, Int32Regs:$amt))]>,
+            [(set Int32Regs:$dst, (rotl (i32 Int32Regs:$src), (i32 Int32Regs:$amt)))]>,
            Requires<[noHWROT32]>;
 
 // 32-bit software rotate right by register.
@@ -1625,7 +1648,7 @@ def ROTR32reg_sw :
             "shl.b32 \t%rhs, $src, %amt2;\n\t"
             "add.u32 \t$dst, %lhs, %rhs;\n\t"
             "}}",
-            [(set Int32Regs:$dst, (rotr Int32Regs:$src, Int32Regs:$amt))]>,
+            [(set Int32Regs:$dst, (rotr (i32 Int32Regs:$src), (i32 Int32Regs:$amt)))]>,
            Requires<[noHWROT32]>;
 
 // 64-bit software rotate by immediate.  $amt2 should equal 64 - $amt1.
@@ -1662,7 +1685,7 @@ def ROTL64reg_sw :
             "shr.b64 \t%rhs, $src, %amt2;\n\t"
             "add.u64 \t$dst, %lhs, %rhs;\n\t"
             "}}",
-            [(set Int64Regs:$dst, (rotl Int64Regs:$src, Int32Regs:$amt))]>;
+            [(set Int64Regs:$dst, (rotl Int64Regs:$src, (i32 Int32Regs:$amt)))]>;
 
 def ROTR64reg_sw :
   NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src, Int32Regs:$amt),
@@ -1675,7 +1698,7 @@ def ROTR64reg_sw :
             "shl.b64 \t%rhs, $src, %amt2;\n\t"
             "add.u64 \t$dst, %lhs, %rhs;\n\t"
             "}}",
-            [(set Int64Regs:$dst, (rotr Int64Regs:$src, Int32Regs:$amt))]>;
+            [(set Int64Regs:$dst, (rotr Int64Regs:$src, (i32 Int32Regs:$amt)))]>;
 
 //
 // Funnnel shift in clamp mode
@@ -1691,14 +1714,14 @@ def FUNSHFLCLAMP :
             (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
             "shf.l.clamp.b32 \t$dst, $lo, $hi, $amt;",
             [(set Int32Regs:$dst,
-              (FUN_SHFL_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+              (FUN_SHFL_CLAMP (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 Int32Regs:$amt)))]>;
 
 def FUNSHFRCLAMP :
   NVPTXInst<(outs Int32Regs:$dst),
             (ins Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
             "shf.r.clamp.b32 \t$dst, $lo, $hi, $amt;",
             [(set Int32Regs:$dst,
-             (FUN_SHFR_CLAMP Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt))]>;
+             (FUN_SHFR_CLAMP (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 Int32Regs:$amt)))]>;
 
 //
 // BFE - bit-field extract
@@ -1915,7 +1938,7 @@ def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
                          [(set Int16Regs:$dst, imm:$src)]>;
 def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
                          "mov.u32 \t$dst, $src;",
-                         [(set Int32Regs:$dst, imm:$src)]>;
+                         [(set (i32 Int32Regs:$dst), imm:$src)]>;
 def IMOV64ri : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
                         "mov.u64 \t$dst, $src;",
                         [(set Int64Regs:$dst, imm:$src)]>;
@@ -1978,9 +2001,9 @@ multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
   // i32 -> pred
   def : Pat<(i1 (OpNode i32:$a, i32:$b)),
             (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
-  def : Pat<(i1 (OpNode Int32Regs:$a, imm:$b)),
+  def : Pat<(i1 (OpNode (i32 Int32Regs:$a), imm:$b)),
             (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
-  def : Pat<(i1 (OpNode imm:$a, Int32Regs:$b)),
+  def : Pat<(i1 (OpNode imm:$a, (i32 Int32Regs:$b))),
             (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
   // i64 -> pred
   def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
@@ -2000,9 +2023,9 @@ multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
   // i32 -> i32
   def : Pat<(i32 (OpNode i32:$a, i32:$b)),
             (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
-  def : Pat<(i32 (OpNode Int32Regs:$a, imm:$b)),
+  def : Pat<(i32 (OpNode (i32 Int32Regs:$a), imm:$b)),
             (set_32ri Int32Regs:$a, imm:$b, Mode)>;
-  def : Pat<(i32 (OpNode imm:$a, Int32Regs:$b)),
+  def : Pat<(i32 (OpNode imm:$a, (i32 Int32Regs:$b))),
             (set_32ir imm:$a, Int32Regs:$b, Mode)>;
   // i64 -> i32
   def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
@@ -2633,7 +2656,7 @@ foreach vt = [f16, bf16] in {
   def: Pat<(vt (ProxyReg  vt:$src)), (ProxyRegI16 Int16Regs:$src)>;
 }
 
-foreach vt = [v2f16, v2bf16] in {
+foreach vt = [v2f16, v2bf16, v2i16] in {
   def: Pat<(vt (ProxyReg  vt:$src)), (ProxyRegI32 Int32Regs:$src)>;
 }
 
@@ -2948,6 +2971,11 @@ def: Pat<(i16 (bitconvert (vt Int16Regs:$a))),
          (ProxyRegI16 Int16Regs:$a)>;
 }
 
+def: Pat<(v2i16 (bitconvert (i32 Int32Regs:$a))),
+         (ProxyRegI32 Int32Regs:$a)>;
+def: Pat<(i32 (bitconvert (v2i16 Int32Regs:$a))),
+         (ProxyRegI32 Int32Regs:$a)>;
+
 // NOTE: pred->fp are currently sub-optimal due to an issue in TableGen where
 // we cannot specify floating-point literals in isel patterns.  Therefore, we
 // use an integer selp to select either 1 or 0 and then cvt to floating-point.
@@ -3207,25 +3235,25 @@ def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
 
 
 // Select instructions with 32-bit predicates
-def : Pat<(select Int32Regs:$pred, i16:$a, i16:$b),
+def : Pat<(select (i32 Int32Regs:$pred), i16:$a, i16:$b),
           (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, i32:$a, i32:$b),
+def : Pat<(select (i32 Int32Regs:$pred), i32:$a, i32:$b),
           (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Int64Regs:$a, Int64Regs:$b),
+def : Pat<(select (i32 Int32Regs:$pred), Int64Regs:$a, Int64Regs:$b),
           (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, (f16 Int16Regs:$a), (f16 Int16Regs:$b)),
+def : Pat<(select (i32 Int32Regs:$pred), (f16 Int16Regs:$a), (f16 Int16Regs:$b)),
           (SELP_f16rr Int16Regs:$a, Int16Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, (bf16 Int16Regs:$a), (bf16 Int16Regs:$b)),
+def : Pat<(select (i32 Int32Regs:$pred), (bf16 Int16Regs:$a), (bf16 Int16Regs:$b)),
           (SELP_bf16rr Int16Regs:$a, Int16Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float32Regs:$a, Float32Regs:$b),
+def : Pat<(select (i32 Int32Regs:$pred), Float32Regs:$a, Float32Regs:$b),
           (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select Int32Regs:$pred, Float64Regs:$a, Float64Regs:$b),
+def : Pat<(select (i32 Int32Regs:$pred), Float64Regs:$a, Float64Regs:$b),
           (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
 
@@ -3300,6 +3328,13 @@ def : Pat<(bf16 (extractelt (v2bf16 Int32Regs:$src), 1)),
 def : Pat<(v2bf16 (build_vector (bf16 Int16Regs:$a), (bf16 Int16Regs:$b))),
           (V2I16toI32 Int16Regs:$a, Int16Regs:$b)>;
 
+def : Pat<(i16 (extractelt (v2i16 Int32Regs:$src), 0)),
+          (I32toI16L Int32Regs:$src)>;
+def : Pat<(i16 (extractelt (v2i16 Int32Regs:$src), 1)),
+          (I32toI16H Int32Regs:$src)>;
+def : Pat<(v2i16 (build_vector (i16 Int16Regs:$a), (i16 Int16Regs:$b))),
+          (V2I16toI32 Int16Regs:$a, Int16Regs:$b)>;
+
 // Count leading zeros
 let hasSideEffects = false in {
   def CLZr32 : NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
@@ -3309,7 +3344,7 @@ let hasSideEffects = false in {
 }
 
 // 32-bit has a direct PTX instruction
-def : Pat<(ctlz Int32Regs:$a), (CLZr32 Int32Regs:$a)>;
+def : Pat<(i32 (ctlz (i32 Int32Regs:$a))), (CLZr32 Int32Regs:$a)>;
 
 // The return type of the ctlz ISD node is the same as its input, but the PTX
 // ctz instruction always returns a 32-bit value.  For ctlz.i64, convert the
@@ -3347,7 +3382,7 @@ let hasSideEffects = false in {
 }
 
 // 32-bit has a direct PTX instruction
-def : Pat<(ctpop Int32Regs:$a), (POPCr32 Int32Regs:$a)>;
+def : Pat<(i32 (ctpop (i32 Int32Regs:$a))), (POPCr32 Int32Regs:$a)>;
 
 // For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
 // to match the LLVM semantics.  Just as with ctlz.i64, we provide a second
@@ -3460,7 +3495,7 @@ let isTerminator=1 in {
                            "bra.uni \t$target;", [(br bb:$target)]>;
 }
 
-def : Pat<(brcond Int32Regs:$a, bb:$target),
+def : Pat<(brcond (i32 Int32Regs:$a), bb:$target),
           (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
 
 // SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
diff --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index f0de0144d410e9c..85eae44f349aa37 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -1460,29 +1460,31 @@ class ATOMIC_SHARED_CHK <dag ops, dag frag>
 class ATOMIC_GENERIC_CHK <dag ops, dag frag>
  : PatFrag<ops, frag, AS_match.generic>;
 
-multiclass F_ATOMIC_2_imp<NVPTXRegClass ptrclass, NVPTXRegClass regclass,
+multiclass F_ATOMIC_2_imp<ValueType ptrT, NVPTXRegClass ptrclass,
+  ValueType regT, NVPTXRegClass regclass,
   string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp,
   Operand IMMType, SDNode IMM, list<Predicate> Pred> {
   def reg : NVPTXInst<(outs regclass:$dst), (ins ptrclass:$addr, regclass:$b),
     !strconcat("atom", SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b;"),
-    [(set regclass:$dst, (IntOp ptrclass:$addr, regclass:$b))]>,
+    [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b)))]>,
   Requires<Pred>;
   def imm : NVPTXInst<(outs regclass:$dst), (ins ptrclass:$addr, IMMType:$b),
     !strconcat("atom", SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b;", ""),
-    [(set regclass:$dst, (IntOp ptrclass:$addr, IMM:$b))]>,
+    [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), IMM:$b))]>,
   Requires<Pred>;
 }
-multiclass F_ATOMIC_2<NVPTXRegClass regclass, string SpaceStr, string TypeStr,
+multiclass F_ATOMIC_2<ValueType regT, NVPTXRegClass regclass, string SpaceStr, string TypeStr,
   string OpcStr, PatFrag IntOp, Operand IMMType, SDNode IMM,
   list<Predicate> Pred = []> {
-  defm p32 : F_ATOMIC_2_imp<Int32Regs, regclass, SpaceStr, TypeStr, OpcStr,
+  defm p32 : F_ATOMIC_2_imp<i32, Int32Regs, regT, regclass, SpaceStr, TypeStr, OpcStr,
     IntOp, IMMType, IMM, Pred>;
-  defm p64 : F_ATOMIC_2_imp<Int64Regs, regclass, SpaceStr, TypeStr, OpcStr,
+  defm p64 : F_ATOMIC_2_imp<i64, Int64Regs, regT, regclass, SpaceStr, TypeStr, OpcStr,
     IntOp, IMMType, IMM, Pred>;
 }
 
 // has 2 operands, neg the second one
-multiclass F_ATOMIC_2_NEG_imp<NVPTXRegClass ptrclass, NVPTXRegClass regclass,
+multiclass F_ATOMIC_2_NEG_imp<ValueType ptrT, NVPTXRegClass ptrclass,
+  ValueType regT, NVPTXRegClass regclass,
   string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp,
   list<Predicate> Pred> {
   def reg : NVPTXInst<(outs regclass:$dst), (ins ptrclass:$addr, regclass:$b),
@@ -1492,50 +1494,51 @@ multiclass F_ATOMIC_2_NEG_imp<NVPTXRegClass ptrclass, NVPTXRegClass regclass,
       "neg.s", TypeStr, " \ttemp, $b; \n\t",
       "atom", SpaceStr, OpcStr, ".u", TypeStr, " \t$dst, [$addr], temp; \n\t",
       "}}"),
-    [(set regclass:$dst, (IntOp ptrclass:$addr, regclass:$b))]>,
+    [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b)))]>,
   Requires<Pred>;
 }
-multiclass F_ATOMIC_2_NEG<NVPTXRegClass regclass, string SpaceStr,
+multiclass F_ATOMIC_2_NEG<ValueType regT, NVPTXRegClass regclass, string SpaceStr,
   string TypeStr, string OpcStr, PatFrag IntOp, list<Predicate> Pred = []> {
- defm p32: F_ATOMIC_2_NEG_imp<Int32Regs, regclass, SpaceStr, TypeStr, OpcStr,
+ defm p32: F_ATOMIC_2_NEG_imp<i32, Int32Regs, regT, regclass, SpaceStr, TypeStr, OpcStr,
    IntOp, Pred> ;
- defm p64: F_ATOMIC_2_NEG_imp<Int64Regs, regclass, SpaceStr, TypeStr, OpcStr,
+ defm p64: F_ATOMIC_2_NEG_imp<i64, Int64Regs, regT, regclass, SpaceStr, TypeStr, OpcStr,
    IntOp, Pred> ;
 }
 
 // has 3 operands
-multiclass F_ATOMIC_3_imp<NVPTXRegClass ptrclass, NVPTXRegClass regclass,
+multiclass F_ATOMIC_3_imp<ValueType ptrT, NVPTXRegClass ptrclass,
+  ValueType regT, NVPTXRegClass regclass,
   string SpaceStr, string TypeStr, string OpcStr, PatFrag IntOp,
   Operand IMMType, list<Predicate> Pred> {
   def reg : NVPTXInst<(outs regclass:$dst),
     (ins ptrclass:$addr, regclass:$b, regclass:$c),
     !strconcat("atom", SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
-    [(set regclass:$dst, (IntOp ptrclass:$addr, regclass:$b, regclass:$c))]>,
+    [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b), (regT regclass:$c)))]>,
   Requires<Pred>;
 
   def imm1 : NVPTXInst<(outs regclass:$dst),
     (ins ptrclass:$addr, IMMType:$b, regclass:$c),
     !strconcat("atom", SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
-    [(set regclass:$dst, (IntOp ptrclass:$addr, imm:$b, regclass:$c))]>,
+    [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), imm:$b, (regT regclass:$c)))]>,
   Requires<Pred>;
 
   def imm2 : NVPTXInst<(outs regclass:$dst),
     (ins ptrclass:$addr, regclass:$b, IMMType:$c),
     !strconcat("atom", SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;", ""),
-    [(set regclass:$dst, (IntOp ptrclass:$addr, regclass:$b, imm:$c))]>,
+    [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), (regT regclass:$b), imm:$c))]>,
   Requires<Pred>;
 
   def imm3 : NVPTXInst<(outs regclass:$dst),
     (ins ptrclass:$addr, IMMType:$b, IMMType:$c),
     !strconcat("atom", SpaceStr, OpcStr, TypeStr, " \t$dst, [$addr], $b, $c;"),
-    [(set regclass:$dst, (IntOp ptrclass:$addr, imm:$b, imm:$c))]>,
+    [(set (regT regclass:$dst), (IntOp (ptrT ptrclass:$addr), imm:$b, imm:$c))]>,
   Requires<Pred>;
 }
-multiclass F_ATOMIC_3<NVPTXRegClass regclass, string SpaceStr, string TypeStr,
+multiclass F_ATOMIC_3<ValueType regT, NVPTXRegClass regclass, string SpaceStr, string TypeStr,
   string OpcStr, PatFrag IntOp, Operand IMMType, list<Predicate> Pred = []> {
-  defm p32 : F_ATOMIC_3_imp<Int32Regs, regclass, SpaceStr, TypeStr, OpcStr,
+  defm p32 : F_ATOMIC_3_imp<i32, Int32Regs, regT, regclass, SpaceStr, TypeStr, OpcStr,
     IntOp, IMMType, Pred>;
-  defm p64 : F_ATOMIC_3_imp<Int64Regs, regclass, SpaceStr, TypeStr, OpcStr,
+  defm p64 : F_ATOMIC_3_imp<i64, Int64Regs, regT, regclass, SpaceStr, TypeStr, OpcStr,
     IntOp, IMMType, Pred>;
 }
 
@@ -1560,36 +1563,36 @@ def atomic_load_add_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
 def atomic_load_add_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
   (atomic_load_fadd node:$a, node:$b)>;
 
-defm INT_PTX_ATOM_ADD_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32", ".add",
+defm INT_PTX_ATOM_ADD_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".u32", ".add",
   atomic_load_add_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_ADD_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32", ".add",
+defm INT_PTX_ATOM_ADD_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".u32", ".add",
   atomic_load_add_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_ADD_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".add",
+defm INT_PTX_ATOM_ADD_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".u32", ".add",
   atomic_load_add_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_ADD_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+defm INT_PTX_ATOM_ADD_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global", ".u32",
   ".add", atomic_load_add_32_gen, i32imm, imm>;
 
-defm INT_PTX_ATOM_ADD_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".u64", ".add",
+defm INT_PTX_ATOM_ADD_G_64 : F_ATOMIC_2<i64, Int64Regs, ".global", ".u64", ".add",
   atomic_load_add_64_g, i64imm, imm>;
-defm INT_PTX_ATOM_ADD_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".u64", ".add",
+defm INT_PTX_ATOM_ADD_S_64 : F_ATOMIC_2<i64, Int64Regs, ".shared", ".u64", ".add",
   atomic_load_add_64_s, i64imm, imm>;
-defm INT_PTX_ATOM_ADD_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".u64", ".add",
+defm INT_PTX_ATOM_ADD_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".u64", ".add",
   atomic_load_add_64_gen, i64imm, imm>;
-defm INT_PTX_ATOM_ADD_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".u64",
+defm INT_PTX_ATOM_ADD_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global", ".u64",
   ".add", atomic_load_add_64_gen, i64imm, imm>;
 
-defm INT_PTX_ATOM_ADD_G_F32 : F_ATOMIC_2<Float32Regs, ".global", ".f32", ".add",
+defm INT_PTX_ATOM_ADD_G_F32 : F_ATOMIC_2<f32, Float32Regs, ".global", ".f32", ".add",
   atomic_load_add_g, f32imm, fpimm>;
-defm INT_PTX_ATOM_ADD_S_F32 : F_ATOMIC_2<Float32Regs, ".shared", ".f32", ".add",
+defm INT_PTX_ATOM_ADD_S_F32 : F_ATOMIC_2<f32, Float32Regs, ".shared", ".f32", ".add",
   atomic_load_add_s, f32imm, fpimm>;
-defm INT_PTX_ATOM_ADD_GEN_F32 : F_ATOMIC_2<Float32Regs, "", ".f32", ".add",
+defm INT_PTX_ATOM_ADD_GEN_F32 : F_ATOMIC_2<f32, Float32Regs, "", ".f32", ".add",
   atomic_load_add_gen, f32imm, fpimm>;
 
-defm INT_PTX_ATOM_ADD_G_F64 : F_ATOMIC_2<Float64Regs, ".global", ".f64", ".add",
+defm INT_PTX_ATOM_ADD_G_F64 : F_ATOMIC_2<f64, Float64Regs, ".global", ".f64", ".add",
   atomic_load_add_g, f64imm, fpimm, [hasAtomAddF64]>;
-defm INT_PTX_ATOM_ADD_S_F64 : F_ATOMIC_2<Float64Regs, ".shared", ".f64", ".add",
+defm INT_PTX_ATOM_ADD_S_F64 : F_ATOMIC_2<f64, Float64Regs, ".shared", ".f64", ".add",
   atomic_load_add_s, f64imm, fpimm, [hasAtomAddF64]>;
-defm INT_PTX_ATOM_ADD_GEN_F64 : F_ATOMIC_2<Float64Regs, "", ".f64", ".add",
+defm INT_PTX_ATOM_ADD_GEN_F64 : F_ATOMIC_2<f64, Float64Regs, "", ".f64", ".add",
   atomic_load_add_gen, f64imm, fpimm, [hasAtomAddF64]>;
 
 // atom_sub
@@ -1607,21 +1610,21 @@ def atomic_load_sub_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
 def atomic_load_sub_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
   (atomic_load_sub_64 node:$a, node:$b)>;
 
-defm INT_PTX_ATOM_SUB_G_32 : F_ATOMIC_2_NEG<Int32Regs, ".global", "32", ".add",
+defm INT_PTX_ATOM_SUB_G_32 : F_ATOMIC_2_NEG<i32, Int32Regs, ".global", "32", ".add",
   atomic_load_sub_32_g>;
-defm INT_PTX_ATOM_SUB_G_64 : F_ATOMIC_2_NEG<Int64Regs, ".global", "64", ".add",
+defm INT_PTX_ATOM_SUB_G_64 : F_ATOMIC_2_NEG<i64, Int64Regs, ".global", "64", ".add",
   atomic_load_sub_64_g>;
-defm INT_PTX_ATOM_SUB_GEN_32 : F_ATOMIC_2_NEG<Int32Regs, "", "32", ".add",
+defm INT_PTX_ATOM_SUB_GEN_32 : F_ATOMIC_2_NEG<i32, Int32Regs, "", "32", ".add",
   atomic_load_sub_32_gen>;
-defm INT_PTX_ATOM_SUB_GEN_32_USE_G : F_ATOMIC_2_NEG<Int32Regs, ".global", "32",
+defm INT_PTX_ATOM_SUB_GEN_32_USE_G : F_ATOMIC_2_NEG<i32, Int32Regs, ".global", "32",
   ".add", atomic_load_sub_32_gen>;
-defm INT_PTX_ATOM_SUB_S_32 : F_ATOMIC_2_NEG<Int32Regs, ".shared", "32", ".add",
+defm INT_PTX_ATOM_SUB_S_32 : F_ATOMIC_2_NEG<i32, Int32Regs, ".shared", "32", ".add",
   atomic_load_sub_32_s>;
-defm INT_PTX_ATOM_SUB_S_64 : F_ATOMIC_2_NEG<Int64Regs, ".shared", "64", ".add",
+defm INT_PTX_ATOM_SUB_S_64 : F_ATOMIC_2_NEG<i64, Int64Regs, ".shared", "64", ".add",
   atomic_load_sub_64_s>;
-defm INT_PTX_ATOM_SUB_GEN_64 : F_ATOMIC_2_NEG<Int64Regs, "", "64", ".add",
+defm INT_PTX_ATOM_SUB_GEN_64 : F_ATOMIC_2_NEG<i64, Int64Regs, "", "64", ".add",
   atomic_load_sub_64_gen>;
-defm INT_PTX_ATOM_SUB_GEN_64_USE_G : F_ATOMIC_2_NEG<Int64Regs, ".global", "64",
+defm INT_PTX_ATOM_SUB_GEN_64_USE_G : F_ATOMIC_2_NEG<i64, Int64Regs, ".global", "64",
   ".add", atomic_load_sub_64_gen>;
 
 // atom_swap
@@ -1639,21 +1642,21 @@ def atomic_swap_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
 def atomic_swap_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
   (atomic_swap_64 node:$a, node:$b)>;
 
-defm INT_PTX_ATOM_SWAP_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".exch",
+defm INT_PTX_ATOM_SWAP_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".b32", ".exch",
   atomic_swap_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_SWAP_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".b32", ".exch",
+defm INT_PTX_ATOM_SWAP_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".b32", ".exch",
   atomic_swap_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_SWAP_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".exch",
+defm INT_PTX_ATOM_SWAP_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".b32", ".exch",
   atomic_swap_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_SWAP_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
+defm INT_PTX_ATOM_SWAP_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global", ".b32",
   ".exch", atomic_swap_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_SWAP_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".b64", ".exch",
+defm INT_PTX_ATOM_SWAP_G_64 : F_ATOMIC_2<i64, Int64Regs, ".global", ".b64", ".exch",
   atomic_swap_64_g, i64imm, imm>;
-defm INT_PTX_ATOM_SWAP_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".b64", ".exch",
+defm INT_PTX_ATOM_SWAP_S_64 : F_ATOMIC_2<i64, Int64Regs, ".shared", ".b64", ".exch",
   atomic_swap_64_s, i64imm, imm>;
-defm INT_PTX_ATOM_SWAP_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".b64", ".exch",
+defm INT_PTX_ATOM_SWAP_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".b64", ".exch",
   atomic_swap_64_gen, i64imm, imm>;
-defm INT_PTX_ATOM_SWAP_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".b64",
+defm INT_PTX_ATOM_SWAP_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global", ".b64",
   ".exch", atomic_swap_64_gen, i64imm, imm>;
 
 // atom_max
@@ -1683,37 +1686,37 @@ def atomic_load_umax_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
 def atomic_load_umax_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
   (atomic_load_umax_64 node:$a, node:$b)>;
 
-defm INT_PTX_ATOM_LOAD_MAX_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".s32",
+defm INT_PTX_ATOM_LOAD_MAX_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".s32",
   ".max", atomic_load_max_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_MAX_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".s32",
+defm INT_PTX_ATOM_LOAD_MAX_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".s32",
   ".max", atomic_load_max_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_MAX_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".s32", ".max",
+defm INT_PTX_ATOM_LOAD_MAX_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".s32", ".max",
   atomic_load_max_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_MAX_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
+defm INT_PTX_ATOM_LOAD_MAX_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global",
   ".s32", ".max", atomic_load_max_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_MAX_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".s64",
+defm INT_PTX_ATOM_LOAD_MAX_G_64 : F_ATOMIC_2<i64, Int64Regs, ".global", ".s64",
   ".max", atomic_load_max_64_g, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_MAX_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".s64",
+defm INT_PTX_ATOM_LOAD_MAX_S_64 : F_ATOMIC_2<i64, Int64Regs, ".shared", ".s64",
   ".max", atomic_load_max_64_s, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_MAX_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".s64", ".max",
+defm INT_PTX_ATOM_LOAD_MAX_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".s64", ".max",
   atomic_load_max_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_MAX_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global",
+defm INT_PTX_ATOM_LOAD_MAX_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global",
   ".s64", ".max", atomic_load_max_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_UMAX_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+defm INT_PTX_ATOM_LOAD_UMAX_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".u32",
   ".max", atomic_load_umax_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_UMAX_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32",
+defm INT_PTX_ATOM_LOAD_UMAX_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".u32",
   ".max", atomic_load_umax_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_UMAX_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".max",
+defm INT_PTX_ATOM_LOAD_UMAX_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".u32", ".max",
   atomic_load_umax_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_UMAX_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
+defm INT_PTX_ATOM_LOAD_UMAX_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global",
   ".u32", ".max", atomic_load_umax_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_UMAX_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".u64",
+defm INT_PTX_ATOM_LOAD_UMAX_G_64 : F_ATOMIC_2<i64, Int64Regs, ".global", ".u64",
   ".max", atomic_load_umax_64_g, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_UMAX_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".u64",
+defm INT_PTX_ATOM_LOAD_UMAX_S_64 : F_ATOMIC_2<i64, Int64Regs, ".shared", ".u64",
   ".max", atomic_load_umax_64_s, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_UMAX_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".u64", ".max",
+defm INT_PTX_ATOM_LOAD_UMAX_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".u64", ".max",
   atomic_load_umax_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_UMAX_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global",
+defm INT_PTX_ATOM_LOAD_UMAX_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global",
   ".u64", ".max", atomic_load_umax_64_gen, i64imm, imm, [hasSM<32>]>;
 
 // atom_min
@@ -1743,37 +1746,37 @@ def atomic_load_umin_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
 def atomic_load_umin_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
   (atomic_load_umin_64 node:$a, node:$b)>;
 
-defm INT_PTX_ATOM_LOAD_MIN_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".s32",
+defm INT_PTX_ATOM_LOAD_MIN_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".s32",
   ".min", atomic_load_min_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_MIN_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".s32",
+defm INT_PTX_ATOM_LOAD_MIN_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".s32",
   ".min", atomic_load_min_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_MIN_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".s32", ".min",
+defm INT_PTX_ATOM_LOAD_MIN_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".s32", ".min",
   atomic_load_min_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_MIN_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
+defm INT_PTX_ATOM_LOAD_MIN_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global",
   ".s32", ".min", atomic_load_min_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_MIN_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".s64",
+defm INT_PTX_ATOM_LOAD_MIN_G_64 : F_ATOMIC_2<i64, Int64Regs, ".global", ".s64",
   ".min", atomic_load_min_64_g, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_MIN_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".s64",
+defm INT_PTX_ATOM_LOAD_MIN_S_64 : F_ATOMIC_2<i64, Int64Regs, ".shared", ".s64",
   ".min", atomic_load_min_64_s, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_MIN_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".s64", ".min",
+defm INT_PTX_ATOM_LOAD_MIN_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".s64", ".min",
   atomic_load_min_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_MIN_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global",
+defm INT_PTX_ATOM_LOAD_MIN_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global",
   ".s64", ".min", atomic_load_min_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_UMIN_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+defm INT_PTX_ATOM_LOAD_UMIN_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".u32",
   ".min", atomic_load_umin_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_UMIN_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32",
+defm INT_PTX_ATOM_LOAD_UMIN_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".u32",
   ".min", atomic_load_umin_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_UMIN_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".min",
+defm INT_PTX_ATOM_LOAD_UMIN_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".u32", ".min",
   atomic_load_umin_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_UMIN_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global",
+defm INT_PTX_ATOM_LOAD_UMIN_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global",
   ".u32", ".min", atomic_load_umin_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_LOAD_UMIN_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".u64",
+defm INT_PTX_ATOM_LOAD_UMIN_G_64 : F_ATOMIC_2<i64, Int64Regs, ".global", ".u64",
   ".min", atomic_load_umin_64_g, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_UMIN_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".u64",
+defm INT_PTX_ATOM_LOAD_UMIN_S_64 : F_ATOMIC_2<i64, Int64Regs, ".shared", ".u64",
   ".min", atomic_load_umin_64_s, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_UMIN_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".u64", ".min",
+defm INT_PTX_ATOM_LOAD_UMIN_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".u64", ".min",
   atomic_load_umin_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_LOAD_UMIN_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global",
+defm INT_PTX_ATOM_LOAD_UMIN_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global",
   ".u64", ".min", atomic_load_umin_64_gen, i64imm, imm, [hasSM<32>]>;
 
 // atom_inc  atom_dec
@@ -1791,21 +1794,21 @@ def atomic_load_dec_32_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
 def atomic_load_dec_32_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
   (int_nvvm_atomic_load_dec_32 node:$a, node:$b)>;
 
-defm INT_PTX_ATOM_INC_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32", ".inc",
+defm INT_PTX_ATOM_INC_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".u32", ".inc",
   atomic_load_inc_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_INC_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32", ".inc",
+defm INT_PTX_ATOM_INC_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".u32", ".inc",
   atomic_load_inc_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_INC_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".inc",
+defm INT_PTX_ATOM_INC_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".u32", ".inc",
   atomic_load_inc_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_INC_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+defm INT_PTX_ATOM_INC_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global", ".u32",
   ".inc", atomic_load_inc_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_DEC_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".u32", ".dec",
+defm INT_PTX_ATOM_DEC_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".u32", ".dec",
   atomic_load_dec_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_DEC_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".u32", ".dec",
+defm INT_PTX_ATOM_DEC_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".u32", ".dec",
   atomic_load_dec_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_DEC_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".u32", ".dec",
+defm INT_PTX_ATOM_DEC_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".u32", ".dec",
   atomic_load_dec_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_DEC_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".u32",
+defm INT_PTX_ATOM_DEC_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global", ".u32",
   ".dec", atomic_load_dec_32_gen, i32imm, imm>;
 
 // atom_and
@@ -1823,21 +1826,21 @@ def atomic_load_and_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
 def atomic_load_and_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
   (atomic_load_and_64 node:$a, node:$b)>;
 
-defm INT_PTX_ATOM_AND_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".and",
+defm INT_PTX_ATOM_AND_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".b32", ".and",
   atomic_load_and_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_AND_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".b32", ".and",
+defm INT_PTX_ATOM_AND_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".b32", ".and",
   atomic_load_and_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_AND_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".and",
+defm INT_PTX_ATOM_AND_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".b32", ".and",
   atomic_load_and_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_AND_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
+defm INT_PTX_ATOM_AND_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global", ".b32",
   ".and", atomic_load_and_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_AND_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".b64", ".and",
+defm INT_PTX_ATOM_AND_G_64 : F_ATOMIC_2<i64, Int64Regs, ".global", ".b64", ".and",
   atomic_load_and_64_g, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_AND_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".b64", ".and",
+defm INT_PTX_ATOM_AND_S_64 : F_ATOMIC_2<i64, Int64Regs, ".shared", ".b64", ".and",
   atomic_load_and_64_s, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_AND_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".b64", ".and",
+defm INT_PTX_ATOM_AND_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".b64", ".and",
   atomic_load_and_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_AND_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".b64",
+defm INT_PTX_ATOM_AND_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global", ".b64",
   ".and", atomic_load_and_64_gen, i64imm, imm, [hasSM<32>]>;
 
 // atom_or
@@ -1855,21 +1858,21 @@ def atomic_load_or_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
 def atomic_load_or_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
   (atomic_load_or_64 node:$a, node:$b)>;
 
-defm INT_PTX_ATOM_OR_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".or",
+defm INT_PTX_ATOM_OR_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".b32", ".or",
   atomic_load_or_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_OR_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".or",
+defm INT_PTX_ATOM_OR_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".b32", ".or",
   atomic_load_or_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_OR_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
+defm INT_PTX_ATOM_OR_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global", ".b32",
   ".or", atomic_load_or_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_OR_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".b32", ".or",
+defm INT_PTX_ATOM_OR_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".b32", ".or",
   atomic_load_or_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_OR_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".b64", ".or",
+defm INT_PTX_ATOM_OR_G_64 : F_ATOMIC_2<i64, Int64Regs, ".global", ".b64", ".or",
   atomic_load_or_64_g, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_OR_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".b64", ".or",
+defm INT_PTX_ATOM_OR_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".b64", ".or",
   atomic_load_or_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_OR_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".b64",
+defm INT_PTX_ATOM_OR_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global", ".b64",
   ".or", atomic_load_or_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_OR_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".b64", ".or",
+defm INT_PTX_ATOM_OR_S_64 : F_ATOMIC_2<i64, Int64Regs, ".shared", ".b64", ".or",
   atomic_load_or_64_s, i64imm, imm, [hasSM<32>]>;
 
 // atom_xor
@@ -1887,21 +1890,21 @@ def atomic_load_xor_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b),
 def atomic_load_xor_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b),
   (atomic_load_xor_64 node:$a, node:$b)>;
 
-defm INT_PTX_ATOM_XOR_G_32 : F_ATOMIC_2<Int32Regs, ".global", ".b32", ".xor",
+defm INT_PTX_ATOM_XOR_G_32 : F_ATOMIC_2<i32, Int32Regs, ".global", ".b32", ".xor",
   atomic_load_xor_32_g, i32imm, imm>;
-defm INT_PTX_ATOM_XOR_S_32 : F_ATOMIC_2<Int32Regs, ".shared", ".b32", ".xor",
+defm INT_PTX_ATOM_XOR_S_32 : F_ATOMIC_2<i32, Int32Regs, ".shared", ".b32", ".xor",
   atomic_load_xor_32_s, i32imm, imm>;
-defm INT_PTX_ATOM_XOR_GEN_32 : F_ATOMIC_2<Int32Regs, "", ".b32", ".xor",
+defm INT_PTX_ATOM_XOR_GEN_32 : F_ATOMIC_2<i32, Int32Regs, "", ".b32", ".xor",
   atomic_load_xor_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_XOR_GEN_32_USE_G : F_ATOMIC_2<Int32Regs, ".global", ".b32",
+defm INT_PTX_ATOM_XOR_GEN_32_USE_G : F_ATOMIC_2<i32, Int32Regs, ".global", ".b32",
   ".xor", atomic_load_xor_32_gen, i32imm, imm>;
-defm INT_PTX_ATOM_XOR_G_64 : F_ATOMIC_2<Int64Regs, ".global", ".b64", ".xor",
+defm INT_PTX_ATOM_XOR_G_64 : F_ATOMIC_2<i64, Int64Regs, ".global", ".b64", ".xor",
   atomic_load_xor_64_g, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_XOR_S_64 : F_ATOMIC_2<Int64Regs, ".shared", ".b64", ".xor",
+defm INT_PTX_ATOM_XOR_S_64 : F_ATOMIC_2<i64, Int64Regs, ".shared", ".b64", ".xor",
   atomic_load_xor_64_s, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_XOR_GEN_64 : F_ATOMIC_2<Int64Regs, "", ".b64", ".xor",
+defm INT_PTX_ATOM_XOR_GEN_64 : F_ATOMIC_2<i64, Int64Regs, "", ".b64", ".xor",
   atomic_load_xor_64_gen, i64imm, imm, [hasSM<32>]>;
-defm INT_PTX_ATOM_XOR_GEN_64_USE_G : F_ATOMIC_2<Int64Regs, ".global", ".b64",
+defm INT_PTX_ATOM_XOR_GEN_64_USE_G : F_ATOMIC_2<i64, Int64Regs, ".global", ".b64",
   ".xor", atomic_load_xor_64_gen, i64imm, imm, [hasSM<32>]>;
 
 // atom_cas
@@ -1919,21 +1922,21 @@ def atomic_cmp_swap_64_s: ATOMIC_SHARED_CHK<(ops node:$a, node:$b, node:$c),
 def atomic_cmp_swap_64_gen: ATOMIC_GENERIC_CHK<(ops node:$a, node:$b, node:$c),
   (atomic_cmp_swap_64 node:$a, node:$b, node:$c)>;
 
-defm INT_PTX_ATOM_CAS_G_32 : F_ATOMIC_3<Int32Regs, ".global", ".b32", ".cas",
+defm INT_PTX_ATOM_CAS_G_32 : F_ATOMIC_3<i32, Int32Regs, ".global", ".b32", ".cas",
   atomic_cmp_swap_32_g, i32imm>;
-defm INT_PTX_ATOM_CAS_S_32 : F_ATOMIC_3<Int32Regs, ".shared", ".b32", ".cas",
+defm INT_PTX_ATOM_CAS_S_32 : F_ATOMIC_3<i32, Int32Regs, ".shared", ".b32", ".cas",
   atomic_cmp_swap_32_s, i32imm>;
-defm INT_PTX_ATOM_CAS_GEN_32 : F_ATOMIC_3<Int32Regs, "", ".b32", ".cas",
+defm INT_PTX_ATOM_CAS_GEN_32 : F_ATOMIC_3<i32, Int32Regs, "", ".b32", ".cas",
   atomic_cmp_swap_32_gen, i32imm>;
-defm INT_PTX_ATOM_CAS_GEN_32_USE_G : F_ATOMIC_3<Int32Regs, ".global", ".b32",
+defm INT_PTX_ATOM_CAS_GEN_32_USE_G : F_ATOMIC_3<i32, Int32Regs, ".global", ".b32",
   ".cas", atomic_cmp_swap_32_gen, i32imm>;
-defm INT_PTX_ATOM_CAS_G_64 : F_ATOMIC_3<Int64Regs, ".global", ".b64", ".cas",
+defm INT_PTX_ATOM_CAS_G_64 : F_ATOMIC_3<i64, Int64Regs, ".global", ".b64", ".cas",
   atomic_cmp_swap_64_g, i64imm>;
-defm INT_PTX_ATOM_CAS_S_64 : F_ATOMIC_3<Int64Regs, ".shared", ".b64", ".cas",
+defm INT_PTX_ATOM_CAS_S_64 : F_ATOMIC_3<i64, Int64Regs, ".shared", ".b64", ".cas",
   atomic_cmp_swap_64_s, i64imm>;
-defm INT_PTX_ATOM_CAS_GEN_64 : F_ATOMIC_3<Int64Regs, "", ".b64", ".cas",
+defm INT_PTX_ATOM_CAS_GEN_64 : F_ATOMIC_3<i64, Int64Regs, "", ".b64", ".cas",
   atomic_cmp_swap_64_gen, i64imm>;
-defm INT_PTX_ATOM_CAS_GEN_64_USE_G : F_ATOMIC_3<Int64Regs, ".global", ".b64",
+defm INT_PTX_ATOM_CAS_GEN_64_USE_G : F_ATOMIC_3<i64, Int64Regs, ".global", ".b64",
   ".cas", atomic_cmp_swap_64_gen, i64imm>;
 
 // Support for scoped atomic operations.  Matches
@@ -1942,76 +1945,76 @@ defm INT_PTX_ATOM_CAS_GEN_64_USE_G : F_ATOMIC_3<Int64Regs, ".global", ".b64",
 // NOTE: not all possible combinations are implemented
 //  'space' is limited to generic as it's the only one needed to support CUDA.
 //  'scope' = 'gpu' is default and is handled by regular atomic instructions.
-class ATOM23_impl<string AsmStr, NVPTXRegClass regclass, list<Predicate> Preds,
+class ATOM23_impl<string AsmStr, ValueType regT, NVPTXRegClass regclass, list<Predicate> Preds,
                   dag ins, dag Operands>
       : NVPTXInst<(outs regclass:$result), ins,
                   AsmStr,
-                  [(set regclass:$result, Operands)]>,
+                  [(set (regT regclass:$result), Operands)]>,
         Requires<Preds>;
 
 // Define instruction variants for all addressing modes.
 multiclass ATOM2P_impl<string AsmStr,  Intrinsic Intr,
-                       NVPTXRegClass regclass, Operand ImmType,
+                       ValueType regT, NVPTXRegClass regclass, Operand ImmType,
                        SDNode Imm, ValueType ImmTy,
                        list<Predicate> Preds> {
   let AddedComplexity = 1 in {
-    def : ATOM23_impl<AsmStr, regclass, Preds,
+    def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int32Regs:$src, regclass:$b),
-                      (Intr Int32Regs:$src, regclass:$b)>;
-    def : ATOM23_impl<AsmStr, regclass, Preds,
+                      (Intr (i32 Int32Regs:$src), (regT regclass:$b))>;
+    def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int64Regs:$src, regclass:$b),
-                      (Intr Int64Regs:$src, regclass:$b)>;
+                      (Intr (i64 Int64Regs:$src), (regT regclass:$b))>;
   }
   // tablegen can't infer argument types from Intrinsic (though it can
   // from Instruction) so we have to enforce specific type on
   // immediates via explicit cast to ImmTy.
-  def : ATOM23_impl<AsmStr, regclass, Preds,
+  def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                     (ins Int32Regs:$src, ImmType:$b),
-                    (Intr Int32Regs:$src, (ImmTy Imm:$b))>;
-  def : ATOM23_impl<AsmStr, regclass, Preds,
+                    (Intr (i32 Int32Regs:$src), (ImmTy Imm:$b))>;
+  def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                     (ins Int64Regs:$src, ImmType:$b),
-                    (Intr Int64Regs:$src, (ImmTy Imm:$b))>;
+                    (Intr (i64 Int64Regs:$src), (ImmTy Imm:$b))>;
 }
 
 multiclass ATOM3P_impl<string AsmStr,  Intrinsic Intr,
-                       NVPTXRegClass regclass, Operand ImmType,
-                       SDNode Imm, ValueType ImmTy,
+                       ValueType regT, NVPTXRegClass regclass,
+                       Operand ImmType, SDNode Imm, ValueType ImmTy,
                        list<Predicate> Preds> {
   // Variants for register/immediate permutations of $b and $c
   let AddedComplexity = 2 in {
-    def : ATOM23_impl<AsmStr, regclass, Preds,
+    def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int32Regs:$src, regclass:$b, regclass:$c),
-                      (Intr Int32Regs:$src, regclass:$b, regclass:$c)>;
-    def : ATOM23_impl<AsmStr, regclass, Preds,
+                      (Intr (i32 Int32Regs:$src), (regT regclass:$b), (regT regclass:$c))>;
+    def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int64Regs:$src, regclass:$b, regclass:$c),
-                      (Intr Int64Regs:$src, regclass:$b, regclass:$c)>;
+                      (Intr (i64 Int64Regs:$src), (regT regclass:$b), (regT regclass:$c))>;
   }
   let AddedComplexity = 1 in {
-    def : ATOM23_impl<AsmStr, regclass, Preds,
+    def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int32Regs:$src, ImmType:$b, regclass:$c),
-                      (Intr Int32Regs:$src, (ImmTy Imm:$b), regclass:$c)>;
-    def : ATOM23_impl<AsmStr, regclass, Preds,
+                      (Intr (i32 Int32Regs:$src), (ImmTy Imm:$b), (regT regclass:$c))>;
+    def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int64Regs:$src, ImmType:$b, regclass:$c),
-                      (Intr Int64Regs:$src, (ImmTy Imm:$b), regclass:$c)>;
-    def : ATOM23_impl<AsmStr, regclass, Preds,
+                      (Intr (i64 Int64Regs:$src), (ImmTy Imm:$b), (regT regclass:$c))>;
+    def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int32Regs:$src, regclass:$b, ImmType:$c),
-                      (Intr Int32Regs:$src, regclass:$b, (ImmTy Imm:$c))>;
-    def : ATOM23_impl<AsmStr, regclass, Preds,
+                      (Intr (i32 Int32Regs:$src), (regT regclass:$b), (ImmTy Imm:$c))>;
+    def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int64Regs:$src, regclass:$b, ImmType:$c),
-                      (Intr Int64Regs:$src, regclass:$b, (ImmTy Imm:$c))>;
+                      (Intr (i64 Int64Regs:$src), (regT regclass:$b), (ImmTy Imm:$c))>;
   }
-  def : ATOM23_impl<AsmStr, regclass, Preds,
+  def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                     (ins Int32Regs:$src, ImmType:$b, ImmType:$c),
-                    (Intr Int32Regs:$src, (ImmTy Imm:$b), (ImmTy Imm:$c))>;
-  def : ATOM23_impl<AsmStr, regclass, Preds,
+                    (Intr (i32 Int32Regs:$src), (ImmTy Imm:$b), (ImmTy Imm:$c))>;
+  def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                     (ins Int64Regs:$src, ImmType:$b, ImmType:$c),
-                    (Intr Int64Regs:$src, (ImmTy Imm:$b), (ImmTy Imm:$c))>;
+                    (Intr (i64 Int64Regs:$src), (ImmTy Imm:$b), (ImmTy Imm:$c))>;
 }
 
 // Constructs intrinsic name and instruction asm strings.
 multiclass ATOM2N_impl<string OpStr, string IntTypeStr, string TypeStr,
                        string ScopeStr, string SpaceStr,
-                       NVPTXRegClass regclass, Operand ImmType, SDNode Imm,
+                       ValueType regT, NVPTXRegClass regclass, Operand ImmType, SDNode Imm,
                        ValueType ImmTy, list<Predicate> Preds> {
   defm : ATOM2P_impl<"atom" # !if(!eq(SpaceStr, "gen"), "", "." # SpaceStr)
                             # !if(!eq(ScopeStr, "gpu"), "", "." # ScopeStr)
@@ -2021,11 +2024,11 @@ multiclass ATOM2N_impl<string OpStr, string IntTypeStr, string TypeStr,
                             "int_nvvm_atomic_" # OpStr
                             # "_" # SpaceStr # "_" # IntTypeStr
                             # !if(!empty(ScopeStr), "", "_" # ScopeStr)),
-                     regclass, ImmType, Imm, ImmTy, Preds>;
+                     regT, regclass, ImmType, Imm, ImmTy, Preds>;
 }
 multiclass ATOM3N_impl<string OpStr, string IntTypeStr, string TypeStr,
                        string ScopeStr, string SpaceStr,
-                       NVPTXRegClass regclass, Operand ImmType, SDNode Imm,
+                       ValueType regT, NVPTXRegClass regclass, Operand ImmType, SDNode Imm,
                        ValueType ImmTy, list<Predicate> Preds> {
   defm : ATOM3P_impl<"atom" # !if(!eq(SpaceStr, "gen"), "", "." # SpaceStr)
                             # !if(!eq(ScopeStr, "gpu"), "", "." # ScopeStr)
@@ -2035,93 +2038,93 @@ multiclass ATOM3N_impl<string OpStr, string IntTypeStr, string TypeStr,
                             "int_nvvm_atomic_" # OpStr
                             # "_" # SpaceStr # "_" # IntTypeStr
                             # !if(!empty(ScopeStr), "", "_" # ScopeStr)),
-                     regclass, ImmType, Imm, ImmTy, Preds>;
+                     regT, regclass, ImmType, Imm, ImmTy, Preds>;
 }
 
 // Constructs variants for different address spaces.
 // For now we only need variants for generic space pointers.
 multiclass ATOM2A_impl<string OpStr, string IntTypeStr, string TypeStr,
-                       string ScopeStr, NVPTXRegClass regclass, Operand ImmType,
+                       string ScopeStr, ValueType regT, NVPTXRegClass regclass, Operand ImmType,
                        SDNode Imm, ValueType ImmTy, list<Predicate> Preds> {
    defm _gen_ : ATOM2N_impl<OpStr, IntTypeStr, TypeStr, ScopeStr, "gen",
-                            regclass, ImmType, Imm, ImmTy, Preds>;
+                            regT, regclass, ImmType, Imm, ImmTy, Preds>;
 }
 multiclass ATOM3A_impl<string OpStr, string IntTypeStr, string TypeStr,
-                       string ScopeStr, NVPTXRegClass regclass, Operand ImmType,
+                       string ScopeStr, ValueType regT, NVPTXRegClass regclass, Operand ImmType,
                        SDNode Imm, ValueType ImmTy, list<Predicate> Preds> {
    defm _gen_ : ATOM3N_impl<OpStr, IntTypeStr, TypeStr, ScopeStr, "gen",
-                            regclass, ImmType, Imm, ImmTy, Preds>;
+                            regT, regclass, ImmType, Imm, ImmTy, Preds>;
 }
 
 // Constructs variants for different scopes of atomic op.
 multiclass ATOM2S_impl<string OpStr, string IntTypeStr, string TypeStr,
-                       NVPTXRegClass regclass, Operand ImmType, SDNode Imm,
+                       ValueType regT, NVPTXRegClass regclass, Operand ImmType, SDNode Imm,
                        ValueType ImmTy, list<Predicate> Preds> {
    // .gpu scope is default and is currently covered by existing
    // atomics w/o explicitly specified scope.
    defm _cta : ATOM2A_impl<OpStr, IntTypeStr, TypeStr, "cta",
-                           regclass, ImmType, Imm, ImmTy,
+                           regT, regclass, ImmType, Imm, ImmTy,
                            !listconcat(Preds,[hasAtomScope])>;
    defm _sys : ATOM2A_impl<OpStr, IntTypeStr, TypeStr, "sys",
-                           regclass, ImmType, Imm, ImmTy,
+                           regT, regclass, ImmType, Imm, ImmTy,
                            !listconcat(Preds,[hasAtomScope])>;
 }
 multiclass ATOM3S_impl<string OpStr, string IntTypeStr, string TypeStr,
-           NVPTXRegClass regclass, Operand ImmType, SDNode Imm, ValueType ImmTy,
+           ValueType regT, NVPTXRegClass regclass, Operand ImmType, SDNode Imm, ValueType ImmTy,
            list<Predicate> Preds> {
    // No need to define ".gpu"-scoped atomics.  They do the same thing
    // as the regular, non-scoped atomics defined elsewhere.
    defm _cta : ATOM3A_impl<OpStr, IntTypeStr, TypeStr, "cta",
-                           regclass, ImmType, Imm, ImmTy,
+                           regT, regclass, ImmType, Imm, ImmTy,
                            !listconcat(Preds,[hasAtomScope])>;
    defm _sys : ATOM3A_impl<OpStr, IntTypeStr, TypeStr, "sys",
-                           regclass, ImmType, Imm, ImmTy,
+                           regT, regclass, ImmType, Imm, ImmTy,
                            !listconcat(Preds,[hasAtomScope])>;
 }
 
 // atom.add
 multiclass ATOM2_add_impl<string OpStr> {
-   defm _s32  : ATOM2S_impl<OpStr, "i", "s32", Int32Regs, i32imm, imm, i32, []>;
-   defm _u32  : ATOM2S_impl<OpStr, "i", "u32", Int32Regs, i32imm, imm, i32, []>;
-   defm _u64  : ATOM2S_impl<OpStr, "i", "u64", Int64Regs, i64imm, imm, i64, []>;
-   defm _f32  : ATOM2S_impl<OpStr, "f", "f32", Float32Regs, f32imm, fpimm, f32,
+   defm _s32  : ATOM2S_impl<OpStr, "i", "s32", i32, Int32Regs, i32imm, imm, i32, []>;
+   defm _u32  : ATOM2S_impl<OpStr, "i", "u32", i32, Int32Regs, i32imm, imm, i32, []>;
+   defm _u64  : ATOM2S_impl<OpStr, "i", "u64", i64, Int64Regs, i64imm, imm, i64, []>;
+   defm _f32  : ATOM2S_impl<OpStr, "f", "f32", f32, Float32Regs, f32imm, fpimm, f32,
                             []>;
-   defm _f64  : ATOM2S_impl<OpStr, "f", "f64", Float64Regs, f64imm, fpimm, f64,
+   defm _f64  : ATOM2S_impl<OpStr, "f", "f64", f64, Float64Regs, f64imm, fpimm, f64,
                             [hasAtomAddF64]>;
 }
 
 // atom.{and,or,xor}
 multiclass ATOM2_bitwise_impl<string OpStr> {
-   defm _b32  : ATOM2S_impl<OpStr, "i", "b32", Int32Regs, i32imm, imm, i32, []>;
-   defm _b64  : ATOM2S_impl<OpStr, "i", "b64", Int64Regs, i64imm, imm, i64,
+   defm _b32  : ATOM2S_impl<OpStr, "i", "b32", i32, Int32Regs, i32imm, imm, i32, []>;
+   defm _b64  : ATOM2S_impl<OpStr, "i", "b64", i64, Int64Regs, i64imm, imm, i64,
                             [hasAtomBitwise64]>;
 }
 
 // atom.exch
 multiclass ATOM2_exch_impl<string OpStr> {
-   defm _b32 : ATOM2S_impl<OpStr, "i", "b32", Int32Regs, i32imm, imm, i32, []>;
-   defm _b64 : ATOM2S_impl<OpStr, "i", "b64", Int64Regs, i64imm, imm, i64, []>;
+   defm _b32 : ATOM2S_impl<OpStr, "i", "b32", i32, Int32Regs, i32imm, imm, i32, []>;
+   defm _b64 : ATOM2S_impl<OpStr, "i", "b64", i64, Int64Regs, i64imm, imm, i64, []>;
 }
 
 // atom.{min,max}
 multiclass ATOM2_minmax_impl<string OpStr> {
-   defm _s32  : ATOM2S_impl<OpStr, "i", "s32", Int32Regs, i32imm, imm, i32, []>;
-   defm _u32  : ATOM2S_impl<OpStr, "i", "u32", Int32Regs, i32imm, imm, i32, []>;
-   defm _s64  : ATOM2S_impl<OpStr, "i", "s64", Int64Regs, i64imm, imm, i64,
+   defm _s32  : ATOM2S_impl<OpStr, "i", "s32", i32, Int32Regs, i32imm, imm, i32, []>;
+   defm _u32  : ATOM2S_impl<OpStr, "i", "u32", i32, Int32Regs, i32imm, imm, i32, []>;
+   defm _s64  : ATOM2S_impl<OpStr, "i", "s64", i64, Int64Regs, i64imm, imm, i64,
                             [hasAtomMinMax64]>;
-   defm _u64  : ATOM2S_impl<OpStr, "i", "u64", Int64Regs, i64imm, imm, i64,
+   defm _u64  : ATOM2S_impl<OpStr, "i", "u64", i64, Int64Regs, i64imm, imm, i64,
                             [hasAtomMinMax64]>;
 }
 
 // atom.{inc,dec}
 multiclass ATOM2_incdec_impl<string OpStr> {
-   defm _u32  : ATOM2S_impl<OpStr, "i", "u32", Int32Regs, i32imm, imm, i32, []>;
+   defm _u32  : ATOM2S_impl<OpStr, "i", "u32", i32, Int32Regs, i32imm, imm, i32, []>;
 }
 
 // atom.cas
 multiclass ATOM3_cas_impl<string OpStr> {
-   defm _b32  : ATOM3S_impl<OpStr, "i", "b32", Int32Regs, i32imm, imm, i32, []>;
-   defm _b64  : ATOM3S_impl<OpStr, "i", "b64", Int64Regs, i64imm, imm, i64, []>;
+   defm _b32  : ATOM3S_impl<OpStr, "i", "b32", i32, Int32Regs, i32imm, imm, i32, []>;
+   defm _b64  : ATOM3S_impl<OpStr, "i", "b64", i64, Int64Regs, i64imm, imm, i64, []>;
 }
 
 defm INT_PTX_SATOM_ADD : ATOM2_add_impl<"add">;
diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
index b62460e8cd31f17..ed9dabf39dd7ad9 100644
--- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
@@ -58,7 +58,7 @@ foreach i = 0...31 in {
 //===----------------------------------------------------------------------===//
 def Int1Regs : NVPTXRegClass<[i1], 8, (add (sequence "P%u", 0, 4))>;
 def Int16Regs : NVPTXRegClass<[i16, f16, bf16], 16, (add (sequence "RS%u", 0, 4))>;
-def Int32Regs : NVPTXRegClass<[i32, v2f16, v2bf16], 32,
+def Int32Regs : NVPTXRegClass<[i32, v2f16, v2bf16, v2i16], 32,
                               (add (sequence "R%u", 0, 4),
                               VRFrame32, VRFrameLocal32)>;
 def Int64Regs : NVPTXRegClass<[i64], 64, (add (sequence "RL%u", 0, 4), VRFrame64, VRFrameLocal64)>;
diff --git a/llvm/test/CodeGen/NVPTX/dag-cse.ll b/llvm/test/CodeGen/NVPTX/dag-cse.ll
index 0b21cdebd87cd2d..bbfedf42ad548ea 100644
--- a/llvm/test/CodeGen/NVPTX/dag-cse.ll
+++ b/llvm/test/CodeGen/NVPTX/dag-cse.ll
@@ -12,8 +12,8 @@
 ; CHECK: ld.global.v2.u8  {%[[B1:rs[0-9]+]], %[[B2:rs[0-9]+]]}, [a];
 ; CHECK: st.global.v2.u8  [b], {%[[B1]], %[[B2]]};
 ;
-; CHECK: ld.global.v2.u16 {%[[C1:rs[0-9]+]], %[[C2:rs[0-9]+]]}, [a];
-; CHECK: st.global.v2.u16 [c], {%[[C1]], %[[C2]]};
+; CHECK: ld.global.u32 %[[C:r[0-9]+]], [a];
+; CHECK: st.global.u32 [c], %[[C]];
 
 define void @test1() #0 {
   %1 = load <2 x i8>, ptr addrspace(1) @a, align 8
diff --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
index dba6691e30aa28e..4eb538628a8dec9 100644
--- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll
@@ -993,9 +993,7 @@ define <2 x double> @test_fpext_2xdouble(<2 x half> %a) #0 {
 
 ; CHECK-LABEL: test_bitcast_2xhalf_to_2xi16(
 ; CHECK:      ld.param.u32    [[A:%r[0-9]+]], [test_bitcast_2xhalf_to_2xi16_param_0];
-; CHECK-DAG:  cvt.u16.u32     [[R0:%rs[0-9]+]], [[A]]
-; CHECK-DAG:  mov.b32         {tmp, [[R1:%rs[0-9]+]]}, [[A]];
-; CHECK:      st.param.v2.b16 [func_retval0+0], {[[R0]], [[R1]]}
+; CHECK:      st.param.b32 [func_retval0+0], [[A]]
 ; CHECK:      ret;
 define <2 x i16> @test_bitcast_2xhalf_to_2xi16(<2 x half> %a) #0 {
   %r = bitcast <2 x half> %a to <2 x i16>
@@ -1003,11 +1001,7 @@ define <2 x i16> @test_bitcast_2xhalf_to_2xi16(<2 x half> %a) #0 {
 }
 
 ; CHECK-LABEL: test_bitcast_2xi16_to_2xhalf(
-; CHECK:      ld.param.v2.u16         {[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [test_bitcast_2xi16_to_2xhalf_param_0];
-; CHECK-DAG:  cvt.u32.u16     [[R0:%r[0-9]+]], [[RS0]];
-; CHECK-DAG:  cvt.u32.u16     [[R1:%r[0-9]+]], [[RS1]];
-; CHECK-DAG:  shl.b32         [[R1H:%r[0-9]+]], [[R1]], 16;
-; CHECK-DAG:  or.b32          [[R:%r[0-9]+]], [[R0]], [[R1H]];
+; CHECK:      ld.param.u32         [[R:%r[0-9]+]], [test_bitcast_2xi16_to_2xhalf_param_0];
 ; CHECK:      st.param.b32    [func_retval0+0], [[R]];
 ; CHECK:      ret;
 define <2 x half> @test_bitcast_2xi16_to_2xhalf(<2 x i16> %a) #0 {
diff --git a/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll
new file mode 100644
index 000000000000000..265e2b99af2156f
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll
@@ -0,0 +1,533 @@
+; ## Support i16x2 instructions
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_90 -mattr=+ptx80 -asm-verbose=false \
+; RUN:          -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK,CHECK-I16x2 %s
+; RUN: %if ptxas %{                                                           \
+; RUN:   llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_90 -asm-verbose=false \
+; RUN:          -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN:   | %ptxas-verify -arch=sm_53                                          \
+; RUN: %}
+; ## No support for i16x2 instructions
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN:          -O0 -disable-post-ra -frame-pointer=all --nvptx-no-f16-math \
+; RUN:           -verify-machineinstrs \
+; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK,CHECK-NOI16x2 %s
+; RUN: %if ptxas %{                                                           \
+; RUN:   llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
+; RUN:          -O0 -disable-post-ra -frame-pointer=all --nvptx-no-f16-math   \
+; RUN:           -verify-machineinstrs                                        \
+; RUN:   | %ptxas-verify -arch=sm_53                                          \
+; RUN: %}
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+; CHECK-LABEL: test_ret_const(
+; CHECK:     mov.u32         [[R:%r[0-9+]]], 131073;
+; CHECK:     st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_ret_const() #0 {
+  ret <2 x i16> <i16 1, i16 2>
+}
+
+; CHECK-LABEL: test_extract_0(
+; CHECK:      ld.param.u32   [[A:%r[0-9]+]], [test_extract_0_param_0];
+; CHECK:      mov.b32        {[[RS:%rs[0-9]+]], tmp}, [[A]];
+; CHECK:      cvt.u32.u16    [[R:%r[0-9]+]], [[RS]];
+; CHECK:      st.param.b32    [func_retval0+0], [[R]];
+; CHECK:      ret;
+define i16 @test_extract_0(<2 x i16> %a) #0 {
+  %e = extractelement <2 x i16> %a, i32 0
+  ret i16 %e
+}
+
+; CHECK-LABEL: test_extract_1(
+; CHECK:      ld.param.u32   [[A:%r[0-9]+]], [test_extract_1_param_0];
+; CHECK:      mov.b32        {tmp, [[RS:%rs[0-9]+]]}, [[A]];
+; CHECK:      cvt.u32.u16    [[R:%r[0-9]+]], [[RS]];
+; CHECK:      st.param.b32    [func_retval0+0], [[R]];
+; CHECK:      ret;
+define i16 @test_extract_1(<2 x i16> %a) #0 {
+  %e = extractelement <2 x i16> %a, i32 1
+  ret i16 %e
+}
+
+; CHECK-LABEL: test_extract_i(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_extract_i_param_0];
+; CHECK-DAG:  ld.param.u64    [[IDX:%rd[0-9]+]], [test_extract_i_param_1];
+; CHECK-DAG:  setp.eq.s64     [[PRED:%p[0-9]+]], [[IDX]], 0;
+; CHECK-DAG:  mov.b32         {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [[A]];
+; CHECK:      selp.b16        [[RS:%rs[0-9]+]], [[E0]], [[E1]], [[PRED]];
+; CHECK:      cvt.u32.u16     [[R:%r[0-9]+]], [[RS]];
+; CHECK:      st.param.b32    [func_retval0+0], [[R]];
+; CHECK:      ret;
+define i16 @test_extract_i(<2 x i16> %a, i64 %idx) #0 {
+  %e = extractelement <2 x i16> %a, i64 %idx
+  ret i16 %e
+}
+
+; CHECK-LABEL: test_add(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_add_param_0];
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_add_param_1];
+;
+; CHECK-I16x2-NEXT:  add.s16x2   [[R:%r[0-9]+]], [[A]], [[B]];
+;
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
+;	CHECK-NOI16x2-DAG: add.s16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG: add.s16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]];
+;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
+;
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_add(<2 x i16> %a, <2 x i16> %b) #0 {
+  %r = add <2 x i16> %a, %b
+  ret <2 x i16> %r
+}
+
+; Check that we can lower add with immediate arguments.
+; CHECK-LABEL: test_add_imm_0(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_add_imm_0_param_0];
+;
+; CHECK-I16x2:        mov.u32        [[I:%r[0-9+]]], 131073;
+; CHECK-I16x2:        add.s16x2      [[R:%r[0-9]+]], [[A]], [[I]];
+;
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
+;	CHECK-NOI16x2-DAG: add.s16 	[[RS2:%rs[0-9]+]], [[RS0]], 1;
+;	CHECK-NOI16x2-DAG: add.s16 	[[RS3:%rs[0-9]+]], [[RS1]], 2;
+;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS2]], [[RS3]]};
+;
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_add_imm_0(<2 x i16> %a) #0 {
+  %r = add <2 x i16> <i16 1, i16 2>, %a
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_add_imm_1(
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_add_imm_1_param_0];
+;
+; CHECK-I16x2:        mov.u32        [[I:%r[0-9+]]], 131073;
+; CHECK-I16x2:        add.s16x2      [[R:%r[0-9]+]], [[A]], [[I]];
+;
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
+;	CHECK-NOI16x2-DAG: add.s16 	[[RS2:%rs[0-9]+]], [[RS0]], 1;
+;	CHECK-NOI16x2-DAG: add.s16 	[[RS3:%rs[0-9]+]], [[RS1]], 2;
+;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS2]], [[RS3]]};
+;
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_add_imm_1(<2 x i16> %a) #0 {
+  %r = add <2 x i16> %a, <i16 1, i16 2>
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_sub(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_sub_param_0];
+;
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_sub_param_1];
+; CHECK-I16x2:   sub.s16x2   [[R:%r[0-9]+]], [[A]], [[B]];
+;
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
+;	CHECK-NOI16x2-DAG: sub.s16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG: sub.s16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]];
+;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
+;
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_sub(<2 x i16> %a, <2 x i16> %b) #0 {
+  %r = sub <2 x i16> %a, %b
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_smax(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_smax_param_0];
+;
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_smax_param_1];
+; CHECK-I16x2:   max.s16x2   [[R:%r[0-9]+]], [[A]], [[B]];
+;
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
+;	CHECK-NOI16x2-DAG:	setp.gt.s16 	[[P0:%p[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG:	setp.gt.s16 	[[P1:%p[0-9]+]], [[RS1]], [[RS3]];
+;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]], [[P0]];
+;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]], [[P1]];
+;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
+;
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_smax(<2 x i16> %a, <2 x i16> %b) #0 {
+  %cmp = icmp sgt <2 x i16> %a, %b
+  %r = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_umax(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_umax_param_0];
+;
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_umax_param_1];
+; CHECK-I16x2:   max.u16x2   [[R:%r[0-9]+]], [[A]], [[B]];
+;
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
+;	CHECK-NOI16x2-DAG:	setp.gt.u16 	[[P0:%p[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG:	setp.gt.u16 	[[P1:%p[0-9]+]], [[RS1]], [[RS3]];
+;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]], [[P0]];
+;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]], [[P1]];
+;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
+;
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_umax(<2 x i16> %a, <2 x i16> %b) #0 {
+  %cmp = icmp ugt <2 x i16> %a, %b
+  %r = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_smin(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_smin_param_0];
+;
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_smin_param_1];
+; CHECK-I16x2:   min.s16x2   [[R:%r[0-9]+]], [[A]], [[B]];
+;
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
+;	CHECK-NOI16x2-DAG:	setp.le.s16 	[[P0:%p[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG:	setp.le.s16 	[[P1:%p[0-9]+]], [[RS1]], [[RS3]];
+;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]], [[P0]];
+;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]], [[P1]];
+;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
+;
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_smin(<2 x i16> %a, <2 x i16> %b) #0 {
+  %cmp = icmp sle <2 x i16> %a, %b
+  %r = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_umin(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_umin_param_0];
+;
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_umin_param_1];
+; CHECK-I16x2:   min.u16x2   [[R:%r[0-9]+]], [[A]], [[B]];
+;
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
+;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
+;	CHECK-NOI16x2-DAG:	setp.le.u16 	[[P0:%p[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG:	setp.le.u16 	[[P1:%p[0-9]+]], [[RS1]], [[RS3]];
+;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]], [[P0]];
+;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]], [[P1]];
+;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
+;
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_umin(<2 x i16> %a, <2 x i16> %b) #0 {
+  %cmp = icmp ule <2 x i16> %a, %b
+  %r = select <2 x i1> %cmp, <2 x i16> %a, <2 x i16> %b
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_mul(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_mul_param_0];
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_mul_param_1];
+;
+;	CHECK-DAG: mov.b32 	    {[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
+;	CHECK-DAG: mov.b32 	    {[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
+;	CHECK-DAG: mul.lo.s16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-DAG: mul.lo.s16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]];
+;	CHECK-DAG: mov.b32 	    [[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
+;
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_mul(<2 x i16> %a, <2 x i16> %b) #0 {
+  %r = mul <2 x i16> %a, %b
+  ret <2 x i16> %r
+}
+
+
+; CHECK-LABEL: .func test_ldst_v2i16(
+; CHECK-DAG:    ld.param.u64    [[A:%rd[0-9]+]], [test_ldst_v2i16_param_0];
+; CHECK-DAG:    ld.param.u64    [[B:%rd[0-9]+]], [test_ldst_v2i16_param_1];
+; CHECK-DAG:    ld.u32          [[E:%r[0-9]+]], [[[A]]];
+; CHECK-DAG:    st.u32          [[[B]]], [[E]];
+; CHECK:        ret;
+define void @test_ldst_v2i16(ptr %a, ptr %b) {
+  %t1 = load <2 x i16>, ptr %a
+  store <2 x i16> %t1, ptr %b, align 16
+  ret void
+}
+
+; CHECK-LABEL: .func test_ldst_v3i16(
+; CHECK-DAG:    ld.param.u64    %[[A:rd[0-9]+]], [test_ldst_v3i16_param_0];
+; CHECK-DAG:    ld.param.u64    %[[B:rd[0-9]+]], [test_ldst_v3i16_param_1];
+; -- v3 is inconvenient to capture as it's lowered as ld.b64 + fair
+;    number of bitshifting instructions that may change at llvm's whim.
+;    So we only verify that we only issue correct number of writes using
+;    correct offset, but not the values we write.
+; CHECK-DAG:    ld.u64
+; CHECK-DAG:    st.u32          [%[[B]]],
+; CHECK-DAG:    st.u16          [%[[B]]+4],
+; CHECK:        ret;
+define void @test_ldst_v3i16(ptr %a, ptr %b) {
+  %t1 = load <3 x i16>, ptr %a
+  store <3 x i16> %t1, ptr %b, align 16
+  ret void
+}
+
+; CHECK-LABEL: .func test_ldst_v4i16(
+; CHECK-DAG:    ld.param.u64    %[[A:rd[0-9]+]], [test_ldst_v4i16_param_0];
+; CHECK-DAG:    ld.param.u64    %[[B:rd[0-9]+]], [test_ldst_v4i16_param_1];
+; CHECK-DAG:    ld.v4.u16       {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [%[[A]]];
+; CHECK-DAG:    st.v4.u16       [%[[B]]], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK:        ret;
+define void @test_ldst_v4i16(ptr %a, ptr %b) {
+  %t1 = load <4 x i16>, ptr %a
+  store <4 x i16> %t1, ptr %b, align 16
+  ret void
+}
+
+; CHECK-LABEL: .func test_ldst_v8i16(
+; CHECK-DAG:    ld.param.u64    %[[A:rd[0-9]+]], [test_ldst_v8i16_param_0];
+; CHECK-DAG:    ld.param.u64    %[[B:rd[0-9]+]], [test_ldst_v8i16_param_1];
+; CHECK-DAG:    ld.v4.b32       {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]], [[E2:%r[0-9]+]], [[E3:%r[0-9]+]]}, [%[[A]]];
+; CHECK-DAG:    st.v4.b32       [%[[B]]], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK:        ret;
+define void @test_ldst_v8i16(ptr %a, ptr %b) {
+  %t1 = load <8 x i16>, ptr %a
+  store <8 x i16> %t1, ptr %b, align 16
+  ret void
+}
+
+declare <2 x i16> @test_callee(<2 x i16> %a, <2 x i16> %b) #0
+
+; CHECK-LABEL: test_call(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_call_param_0];
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_call_param_1];
+; CHECK:      {
+; CHECK-DAG:  .param .align 4 .b8 param0[4];
+; CHECK-DAG:  .param .align 4 .b8 param1[4];
+; CHECK-DAG:  st.param.b32    [param0+0], [[A]];
+; CHECK-DAG:  st.param.b32    [param1+0], [[B]];
+; CHECK-DAG:  .param .align 4 .b8 retval0[4];
+; CHECK:      call.uni (retval0),
+; CHECK-NEXT:        test_callee,
+; CHECK:      );
+; CHECK-NEXT: ld.param.b32    [[R:%r[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_call(<2 x i16> %a, <2 x i16> %b) #0 {
+  %r = call <2 x i16> @test_callee(<2 x i16> %a, <2 x i16> %b)
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_call_flipped(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_call_flipped_param_0];
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_call_flipped_param_1];
+; CHECK:      {
+; CHECK-DAG:  .param .align 4 .b8 param0[4];
+; CHECK-DAG:  .param .align 4 .b8 param1[4];
+; CHECK-DAG:  st.param.b32    [param0+0], [[B]];
+; CHECK-DAG:  st.param.b32    [param1+0], [[A]];
+; CHECK-DAG:  .param .align 4 .b8 retval0[4];
+; CHECK:      call.uni (retval0),
+; CHECK-NEXT:        test_callee,
+; CHECK:      );
+; CHECK-NEXT: ld.param.b32    [[R:%r[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_call_flipped(<2 x i16> %a, <2 x i16> %b) #0 {
+  %r = call <2 x i16> @test_callee(<2 x i16> %b, <2 x i16> %a)
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_tailcall_flipped(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_tailcall_flipped_param_0];
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_tailcall_flipped_param_1];
+; CHECK:      {
+; CHECK-DAG:  .param .align 4 .b8 param0[4];
+; CHECK-DAG:  .param .align 4 .b8 param1[4];
+; CHECK-DAG:  st.param.b32    [param0+0], [[B]];
+; CHECK-DAG:  st.param.b32    [param1+0], [[A]];
+; CHECK-DAG:  .param .align 4 .b8 retval0[4];
+; CHECK:      call.uni (retval0),
+; CHECK-NEXT:        test_callee,
+; CHECK:      );
+; CHECK-NEXT: ld.param.b32    [[R:%r[0-9]+]], [retval0+0];
+; CHECK-NEXT: }
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_tailcall_flipped(<2 x i16> %a, <2 x i16> %b) #0 {
+  %r = tail call <2 x i16> @test_callee(<2 x i16> %b, <2 x i16> %a)
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_select(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_select_param_0];
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_select_param_1];
+; CHECK-DAG:  ld.param.u8     [[C:%rs[0-9]+]], [test_select_param_2]
+; CHECK-DAG:  setp.eq.b16     [[PRED:%p[0-9]+]], %rs{{.*}}, 1;
+; CHECK-NEXT: selp.b32        [[R:%r[0-9]+]], [[A]], [[B]], [[PRED]];
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_select(<2 x i16> %a, <2 x i16> %b, i1 zeroext %c) #0 {
+  %r = select i1 %c, <2 x i16> %a, <2 x i16> %b
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_select_cc(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_select_cc_param_0];
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_select_cc_param_1];
+; CHECK-DAG:  ld.param.u32    [[C:%r[0-9]+]], [test_select_cc_param_2];
+; CHECK-DAG:  ld.param.u32    [[D:%r[0-9]+]], [test_select_cc_param_3];
+; CHECK-DAG:  mov.b32        {[[C0:%rs[0-9]+]], [[C1:%rs[0-9]+]]}, [[C]]
+; CHECK-DAG:  mov.b32        {[[D0:%rs[0-9]+]], [[D1:%rs[0-9]+]]}, [[D]]
+; CHECK-DAG:  setp.ne.s16    [[P0:%p[0-9]+]], [[C0]], [[D0]]
+; CHECK-DAG:  setp.ne.s16    [[P1:%p[0-9]+]], [[C1]], [[D1]]
+; CHECK-DAG:  mov.b32         {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]]
+; CHECK-DAG:  mov.b32         {[[B0:%rs[0-9]+]], [[B1:%rs[0-9]+]]}, [[B]]
+; CHECK-DAG:  selp.b16        [[R0:%rs[0-9]+]], [[A0]], [[B0]], [[P0]];
+; CHECK-DAG:  selp.b16        [[R1:%rs[0-9]+]], [[A1]], [[B1]], [[P1]];
+; CHECK:      mov.b32         [[R:%r[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_select_cc(<2 x i16> %a, <2 x i16> %b, <2 x i16> %c, <2 x i16> %d) #0 {
+  %cc = icmp ne <2 x i16> %c, %d
+  %r = select <2 x i1> %cc, <2 x i16> %a, <2 x i16> %b
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_select_cc_i32_i16(
+; CHECK-DAG:  ld.param.v2.u32    {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_select_cc_i32_i16_param_0];
+; CHECK-DAG:  ld.param.v2.u32    {[[B0:%r[0-9]+]], [[B1:%r[0-9]+]]}, [test_select_cc_i32_i16_param_1];
+; CHECK-DAG:  ld.param.u32    [[C:%r[0-9]+]], [test_select_cc_i32_i16_param_2];
+; CHECK-DAG:  ld.param.u32    [[D:%r[0-9]+]], [test_select_cc_i32_i16_param_3];
+; CHECK-DAG: mov.b32         {[[C0:%rs[0-9]+]], [[C1:%rs[0-9]+]]}, [[C]]
+; CHECK-DAG: mov.b32         {[[D0:%rs[0-9]+]], [[D1:%rs[0-9]+]]}, [[D]]
+; CHECK-DAG: setp.ne.s16    [[P0:%p[0-9]+]], [[C0]], [[D0]]
+; CHECK-DAG: setp.ne.s16    [[P1:%p[0-9]+]], [[C1]], [[D1]]
+; CHECK-DAG: selp.b32        [[R0:%r[0-9]+]], [[A0]], [[B0]], [[P0]];
+; CHECK-DAG: selp.b32        [[R1:%r[0-9]+]], [[A1]], [[B1]], [[P1]];
+; CHECK-NEXT: st.param.v2.b32    [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK-NEXT: ret;
+define <2 x i32> @test_select_cc_i32_i16(<2 x i32> %a, <2 x i32> %b,
+                                           <2 x i16> %c, <2 x i16> %d) #0 {
+  %cc = icmp ne <2 x i16> %c, %d
+  %r = select <2 x i1> %cc, <2 x i32> %a, <2 x i32> %b
+  ret <2 x i32> %r
+}
+
+; CHECK-LABEL: test_select_cc_i16_i32(
+; CHECK-DAG:  ld.param.u32    [[A:%r[0-9]+]], [test_select_cc_i16_i32_param_0];
+; CHECK-DAG:  ld.param.u32    [[B:%r[0-9]+]], [test_select_cc_i16_i32_param_1];
+; CHECK-DAG:  ld.param.v2.u32 {[[C0:%r[0-9]+]], [[C1:%r[0-9]+]]}, [test_select_cc_i16_i32_param_2];
+; CHECK-DAG:  ld.param.v2.u32 {[[D0:%r[0-9]+]], [[D1:%r[0-9]+]]}, [test_select_cc_i16_i32_param_3];
+; CHECK-DAG:  setp.ne.s32    [[P0:%p[0-9]+]], [[C0]], [[D0]]
+; CHECK-DAG:  setp.ne.s32    [[P1:%p[0-9]+]], [[C1]], [[D1]]
+; CHECK-DAG:  mov.b32         {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]]
+; CHECK-DAG:  mov.b32         {[[B0:%rs[0-9]+]], [[B1:%rs[0-9]+]]}, [[B]]
+; CHECK-DAG:  selp.b16        [[R0:%rs[0-9]+]], [[A0]], [[B0]], [[P0]];
+; CHECK-DAG:  selp.b16        [[R1:%rs[0-9]+]], [[A1]], [[B1]], [[P1]];
+; CHECK:      mov.b32         [[R:%r[0-9]+]], {[[R0]], [[R1]]}
+; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
+; CHECK-NEXT: ret;
+define <2 x i16> @test_select_cc_i16_i32(<2 x i16> %a, <2 x i16> %b,
+                                          <2 x i32> %c, <2 x i32> %d) #0 {
+  %cc = icmp ne <2 x i32> %c, %d
+  %r = select <2 x i1> %cc, <2 x i16> %a, <2 x i16> %b
+  ret <2 x i16> %r
+}
+
+
+; CHECK-LABEL: test_trunc_2xi32(
+; CHECK:      ld.param.v2.u32 {[[A0:%r[0-9]+]], [[A1:%r[0-9]+]]}, [test_trunc_2xi32_param_0];
+; CHECK-DAG:  cvt.u16.u32  [[R0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG:  cvt.u16.u32  [[R1:%rs[0-9]+]], [[A1]];
+; CHECK:      mov.b32         [[R:%r[0-9]+]], {[[R0]], [[R1]]}
+; CHECK:      st.param.b32    [func_retval0+0], [[R]];
+; CHECK:      ret;
+define <2 x i16> @test_trunc_2xi32(<2 x i32> %a) #0 {
+  %r = trunc <2 x i32> %a to <2 x i16>
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_trunc_2xi64(
+; CHECK:      ld.param.v2.u64 {[[A0:%rd[0-9]+]], [[A1:%rd[0-9]+]]}, [test_trunc_2xi64_param_0];
+; CHECK-DAG:  cvt.u16.u64  [[R0:%rs[0-9]+]], [[A0]];
+; CHECK-DAG:  cvt.u16.u64  [[R1:%rs[0-9]+]], [[A1]];
+; CHECK:      mov.b32         [[R:%r[0-9]+]], {[[R0]], [[R1]]}
+; CHECK:      st.param.b32    [func_retval0+0], [[R]];
+; CHECK:      ret;
+define <2 x i16> @test_trunc_2xi64(<2 x i64> %a) #0 {
+  %r = trunc <2 x i64> %a to <2 x i16>
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_zext_2xi32(
+; CHECK:      ld.param.u32    [[A:%r[0-9]+]], [test_zext_2xi32_param_0];
+; CHECK:      mov.b32         {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]]
+; CHECK-DAG:  cvt.u32.u16     [[R0:%r[0-9]+]], [[A0]];
+; CHECK-DAG:  cvt.u32.u16     [[R1:%r[0-9]+]], [[A1]];
+; CHECK-NEXT: st.param.v2.b32 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK:      ret;
+define <2 x i32> @test_zext_2xi32(<2 x i16> %a) #0 {
+  %r = zext <2 x i16> %a to <2 x i32>
+  ret <2 x i32> %r
+}
+
+; CHECK-LABEL: test_zext_2xi64(
+; CHECK:      ld.param.u32    [[A:%r[0-9]+]], [test_zext_2xi64_param_0];
+; CHECK:      mov.b32         {[[A0:%rs[0-9]+]], [[A1:%rs[0-9]+]]}, [[A]]
+; CHECK-DAG:  cvt.u64.u16     [[R0:%rd[0-9]+]], [[A0]];
+; CHECK-DAG:  cvt.u64.u16     [[R1:%rd[0-9]+]], [[A1]];
+; CHECK-NEXT: st.param.v2.b64 [func_retval0+0], {[[R0]], [[R1]]};
+; CHECK:      ret;
+define <2 x i64> @test_zext_2xi64(<2 x i16> %a) #0 {
+  %r = zext <2 x i16> %a to <2 x i64>
+  ret <2 x i64> %r
+}
+
+; CHECK-LABEL: test_bitcast_i32_to_2xi16(
+; CHECK: ld.param.u32 	[[R:%r[0-9]+]], [test_bitcast_i32_to_2xi16_param_0];
+; CHECK: st.param.b32 	[func_retval0+0], [[R]];
+; CHECK: ret;
+define <2 x i16> @test_bitcast_i32_to_2xi16(i32 %a) #0 {
+  %r = bitcast i32 %a to <2 x i16>
+  ret <2 x i16> %r
+}
+
+; CHECK-LABEL: test_bitcast_2xi16_to_i32(
+; CHECK: ld.param.u32 	[[R:%r[0-9]+]], [test_bitcast_2xi16_to_i32_param_0];
+; CHECK: st.param.b32 	[func_retval0+0], [[R]];
+; CHECK: ret;
+define i32 @test_bitcast_2xi16_to_i32(<2 x i16> %a) #0 {
+  %r = bitcast <2 x i16> %a to i32
+  ret i32 %r
+}
+
+; CHECK-LABEL: test_shufflevector(
+; CHECK:	ld.param.u32 	[[R:%r[0-9]+]], [test_shufflevector_param_0];
+; CHECK:	mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[R]];
+; CHECK:	mov.b32 	[[R1:%r[0-9]+]], {[[RS1]], [[RS0]]};
+; CHECK:	st.param.b32 	[func_retval0+0], [[R1]];
+; CHECK:	ret;
+define <2 x i16> @test_shufflevector(<2 x i16> %a) #0 {
+  %s = shufflevector <2 x i16> %a, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
+  ret <2 x i16> %s
+}
+
+; CHECK-LABEL: test_insertelement(
+; CHECK:  ld.param.u16 	[[B:%rs[0-9]+]], [test_insertelement_param_1];
+; CHECK:	ld.param.u32 	[[A:%r[0-9]+]], [test_insertelement_param_0];
+; CHECK:	{ .reg .b16 tmp; mov.b32 {[[R0:%rs[0-9]+]], tmp}, [[A]]; }
+; CHECK:	mov.b32 	[[R1:%r[0-9]+]], {[[R0]], [[B]]};
+; CHECK:	st.param.b32 	[func_retval0+0], [[R1]];
+; CHECK:	ret;
+define <2 x i16> @test_insertelement(<2 x i16> %a, i16 %x) #0 {
+  %i = insertelement <2 x i16> %a, i16 %x, i64 1
+  ret <2 x i16> %i
+}
+
+attributes #0 = { nounwind }
diff --git a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
index aa9a1280abcd6a4..9012339fb6b1e20 100644
--- a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
+++ b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
@@ -80,9 +80,9 @@ define void @foo7(ptr noalias readonly %from, ptr %to) {
 }
 
 ; SM20-LABEL: .visible .entry foo8(
-; SM20: ld.global.v2.u16
+; SM20: ld.global.u32
 ; SM35-LABEL: .visible .entry foo8(
-; SM35: ld.global.nc.v2.u16
+; SM35: ld.global.nc.u32
 define void @foo8(ptr noalias readonly %from, ptr %to) {
   %1 = load <2 x i16>, ptr %from
   store <2 x i16> %1, ptr %to
diff --git a/llvm/test/CodeGen/NVPTX/param-load-store.ll b/llvm/test/CodeGen/NVPTX/param-load-store.ll
index 313a0915d2030f7..2d87271e30ae0b7 100644
--- a/llvm/test/CodeGen/NVPTX/param-load-store.ll
+++ b/llvm/test/CodeGen/NVPTX/param-load-store.ll
@@ -326,7 +326,8 @@ define signext i16 @test_i16s(i16 signext %a) {
 ; CHECK-LABEL: test_v3i16(
 ; CHECK-NEXT: .param .align 8 .b8 test_v3i16_param_0[8]
 ; CHECK-DAG:  ld.param.u16    [[E2:%rs[0-9]+]], [test_v3i16_param_0+4];
-; CHECK-DAG:  ld.param.v2.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i16_param_0];
+; CHECK-DAG:  ld.param.u32    [[R:%r[0-9]+]], [test_v3i16_param_0];
+; CHECK-DAG:  mov.b32 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [[R]];
 ; CHECK:      .param .align 8 .b8 param0[8];
 ; CHECK:      st.param.v2.b16 [param0+0], {[[E0]], [[E1]]};
 ; CHECK:      st.param.b16    [param0+4], [[E2]];
@@ -346,14 +347,14 @@ define <3 x i16> @test_v3i16(<3 x i16> %a) {
 ; CHECK: .func  (.param .align 8 .b8 func_retval0[8])
 ; CHECK-LABEL: test_v4i16(
 ; CHECK-NEXT: .param .align 8 .b8 test_v4i16_param_0[8]
-; CHECK:      ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i16_param_0]
+; CHECK:      ld.param.v2.u32 {[[E0:%r[0-9]+]], [[E1:%r[0-9]+]]}, [test_v4i16_param_0]
 ; CHECK:      .param .align 8 .b8 param0[8];
-; CHECK:      st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
+; CHECK:      st.param.v2.b32 [param0+0], {[[E0]], [[E1]]};
 ; CHECK:      .param .align 8 .b8 retval0[8];
 ; CHECK:      call.uni (retval0),
 ; CHECK-NEXT: test_v4i16,
-; CHECK:      ld.param.v4.b16 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0];
-; CHECK:      st.param.v4.b16 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}
+; CHECK:      ld.param.v2.b32 {[[RE0:%r[0-9]+]], [[RE1:%r[0-9]+]]}, [retval0+0];
+; CHECK:      st.param.v2.b32 [func_retval0+0], {[[RE0]], [[RE1]]}
 ; CHECK-NEXT: ret;
 define <4 x i16> @test_v4i16(<4 x i16> %a) {
        %r = tail call <4 x i16> @test_v4i16(<4 x i16> %a);
@@ -365,6 +366,10 @@ define <4 x i16> @test_v4i16(<4 x i16> %a) {
 ; CHECK-NEXT: .param .align 16 .b8 test_v5i16_param_0[16]
 ; CHECK-DAG:  ld.param.u16    [[E4:%rs[0-9]+]], [test_v5i16_param_0+8];
 ; CHECK-DAG:  ld.param.v4.u16 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i16_param_0]
+; CHECK-DAG:	mov.b32 	[[R0:%r[0-9]+]], {[[E0]], [[E1]]};
+; CHECK-DAG:	mov.b32 	{[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [[R0]];
+; CHECK-DAG:	mov.b32 	[[R1:%r[0-9]+]], {[[E2]], [[E3]]};
+; CHECK-DAG:	mov.b32 	{[[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [[R1]];
 ; CHECK:      .param .align 16 .b8 param0[16];
 ; CHECK-DAG:  st.param.v4.b16 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]};
 ; CHECK-DAG:  st.param.b16    [param0+8], [[E4]];
diff --git a/llvm/test/CodeGen/NVPTX/vec-param-load.ll b/llvm/test/CodeGen/NVPTX/vec-param-load.ll
index f8b16c1feab5e09..f4f5c26be3474bf 100644
--- a/llvm/test/CodeGen/NVPTX/vec-param-load.ll
+++ b/llvm/test/CodeGen/NVPTX/vec-param-load.ll
@@ -70,14 +70,10 @@ define <8 x i64> @test_v8i64(<8 x i64> %a) {
 
 define <16 x i16> @test_v16i16(<16 x i16> %a) {
 ; CHECK-LABEL: test_v16i16(
-; CHECK-DAG: ld.param.v4.u16     {[[V_12_15:(%rs[0-9]+[, ]*){4}]]}, [test_v16i16_param_0+24];
-; CHECK-DAG: ld.param.v4.u16     {[[V_8_11:(%rs[0-9]+[, ]*){4}]]}, [test_v16i16_param_0+16];
-; CHECK-DAG: ld.param.v4.u16     {[[V_4_7:(%rs[0-9]+[, ]*){4}]]}, [test_v16i16_param_0+8];
-; CHECK-DAG: ld.param.v4.u16     {[[V_0_3:(%rs[0-9]+[, ]*){4}]]}, [test_v16i16_param_0];
-; CHECK-DAG: st.param.v4.b16     [func_retval0+0], {[[V_0_3]]}
-; CHECK-DAG: st.param.v4.b16     [func_retval0+8], {[[V_4_7]]}
-; CHECK-DAG: st.param.v4.b16     [func_retval0+16], {[[V_8_11]]}
-; CHECK-DAG: st.param.v4.b16     [func_retval0+24], {[[V_12_15]]}
+; CHECK-DAG: ld.param.v4.u32     {[[V_8_15:(%r[0-9]+[, ]*){4}]]}, [test_v16i16_param_0+16];
+; CHECK-DAG: ld.param.v4.u32     {[[V_0_7:(%r[0-9]+[, ]*){4}]]}, [test_v16i16_param_0];
+; CHECK-DAG: st.param.v4.b32     [func_retval0+0], {[[V_0_7]]}
+; CHECK-DAG: st.param.v4.b32     [func_retval0+16], {[[V_8_15]]}
 ; CHECK: ret;
   ret <16 x i16> %a
 }
diff --git a/llvm/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic-inseltpoison.ll
deleted file mode 100644
index aeb4aa8078aa2e7..000000000000000
--- a/llvm/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic-inseltpoison.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=slp-vectorizer -o - -S -slp-threshold=-1000 | FileCheck %s
-
-target datalayout = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"
-target triple = "nvptx--nvidiacl"
-
-; CTLZ cannot be vectorized currently because the second argument is a scalar
-; for both the scalar and vector forms of the intrinsic. In the future it
-; should be possible to vectorize such functions.
-; Test causes an assert if LLVM tries to vectorize CTLZ.
-
-define <2 x i8> @cltz_test(<2 x i8> %x) #0 {
-; CHECK-LABEL: @cltz_test(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <2 x i8> [[X:%.*]], i32 0
-; CHECK-NEXT:    [[CALL_I:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP0]], i1 false)
-; CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <2 x i8> poison, i8 [[CALL_I]], i32 0
-; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i8> [[X]], i32 1
-; CHECK-NEXT:    [[CALL_I4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP1]], i1 false)
-; CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <2 x i8> [[VECINIT]], i8 [[CALL_I4]], i32 1
-; CHECK-NEXT:    ret <2 x i8> [[VECINIT2]]
-;
-entry:
-  %0 = extractelement <2 x i8> %x, i32 0
-  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
-  %vecinit = insertelement <2 x i8> poison, i8 %call.i, i32 0
-  %1 = extractelement <2 x i8> %x, i32 1
-  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
-  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
-  ret <2 x i8> %vecinit2
-}
-
-define <2 x i8> @cltz_test2(<2 x i8> %x) #1 {
-; CHECK-LABEL: @cltz_test2(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <2 x i8> [[X:%.*]], i32 0
-; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i8> [[X]], i32 1
-; CHECK-NEXT:    [[CALL_I:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP0]], i1 false)
-; CHECK-NEXT:    [[CALL_I4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP1]], i1 false)
-; CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <2 x i8> poison, i8 [[CALL_I]], i32 0
-; CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <2 x i8> [[VECINIT]], i8 [[CALL_I4]], i32 1
-; CHECK-NEXT:    ret <2 x i8> [[VECINIT2]]
-;
-entry:
-  %0 = extractelement <2 x i8> %x, i32 0
-  %1 = extractelement <2 x i8> %x, i32 1
-  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
-  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
-  %vecinit = insertelement <2 x i8> poison, i8 %call.i, i32 0
-  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
-  ret <2 x i8> %vecinit2
-}
-
-declare i8 @llvm.ctlz.i8(i8, i1) #3
-
-attributes #0 = { alwaysinline nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
diff --git a/llvm/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic.ll b/llvm/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic.ll
deleted file mode 100644
index 4f92fa7c3d0f42a..000000000000000
--- a/llvm/test/Transforms/SLPVectorizer/NVPTX/non-vectorizable-intrinsic.ll
+++ /dev/null
@@ -1,57 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -passes=slp-vectorizer -o - -S -slp-threshold=-1000 | FileCheck %s
-
-target datalayout = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"
-target triple = "nvptx--nvidiacl"
-
-; CTLZ cannot be vectorized currently because the second argument is a scalar
-; for both the scalar and vector forms of the intrinsic. In the future it
-; should be possible to vectorize such functions.
-; Test causes an assert if LLVM tries to vectorize CTLZ.
-
-define <2 x i8> @cltz_test(<2 x i8> %x) #0 {
-; CHECK-LABEL: @cltz_test(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <2 x i8> [[X:%.*]], i32 0
-; CHECK-NEXT:    [[CALL_I:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP0]], i1 false)
-; CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <2 x i8> undef, i8 [[CALL_I]], i32 0
-; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i8> [[X]], i32 1
-; CHECK-NEXT:    [[CALL_I4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP1]], i1 false)
-; CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <2 x i8> [[VECINIT]], i8 [[CALL_I4]], i32 1
-; CHECK-NEXT:    ret <2 x i8> [[VECINIT2]]
-;
-entry:
-  %0 = extractelement <2 x i8> %x, i32 0
-  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
-  %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
-  %1 = extractelement <2 x i8> %x, i32 1
-  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
-  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
-  ret <2 x i8> %vecinit2
-}
-
-define <2 x i8> @cltz_test2(<2 x i8> %x) #1 {
-; CHECK-LABEL: @cltz_test2(
-; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = extractelement <2 x i8> [[X:%.*]], i32 0
-; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <2 x i8> [[X]], i32 1
-; CHECK-NEXT:    [[CALL_I:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP0]], i1 false)
-; CHECK-NEXT:    [[CALL_I4:%.*]] = call i8 @llvm.ctlz.i8(i8 [[TMP1]], i1 false)
-; CHECK-NEXT:    [[VECINIT:%.*]] = insertelement <2 x i8> undef, i8 [[CALL_I]], i32 0
-; CHECK-NEXT:    [[VECINIT2:%.*]] = insertelement <2 x i8> [[VECINIT]], i8 [[CALL_I4]], i32 1
-; CHECK-NEXT:    ret <2 x i8> [[VECINIT2]]
-;
-entry:
-  %0 = extractelement <2 x i8> %x, i32 0
-  %1 = extractelement <2 x i8> %x, i32 1
-  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
-  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
-  %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
-  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
-  ret <2 x i8> %vecinit2
-}
-
-declare i8 @llvm.ctlz.i8(i8, i1) #3
-
-attributes #0 = { alwaysinline nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readnone }
diff --git a/llvm/test/Transforms/SLPVectorizer/NVPTX/vectorizable-intrinsic.ll b/llvm/test/Transforms/SLPVectorizer/NVPTX/vectorizable-intrinsic.ll
new file mode 100644
index 000000000000000..e6ad2adba1759e0
--- /dev/null
+++ b/llvm/test/Transforms/SLPVectorizer/NVPTX/vectorizable-intrinsic.ll
@@ -0,0 +1,42 @@
+; RUN: opt < %s -passes=slp-vectorizer -o - -S -slp-threshold=-1000 | FileCheck %s
+
+target datalayout = "e-p:32:32-i64:64-v16:16-v32:32-n16:32:64"
+target triple = "nvptx--nvidiacl"
+
+; Test that CTLZ can be vectorized currently even though the second argument is a scalar
+
+define <2 x i8> @cltz_test(<2 x i8> %x) #0 {
+; CHECK-LABEL: @cltz_test(
+;      CHECK: [[VEC:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %{{.*}}, i1 false)
+; CHECK-NEXT: ret <2 x i8> [[VEC]]
+;
+entry:
+  %0 = extractelement <2 x i8> %x, i32 0
+  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+  %vecinit = insertelement <2 x i8> undef, i8 %call.i, i32 0
+  %1 = extractelement <2 x i8> %x, i32 1
+  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+  ret <2 x i8> %vecinit2
+}
+
+
+define <2 x i8> @cltz_test_poison(<2 x i8> %x) #0 {
+; CHECK-LABEL: @cltz_test_poison(
+;      CHECK: [[VEC:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %{{.*}}, i1 false)
+; CHECK-NEXT: ret <2 x i8> [[VEC]]
+;
+entry:
+  %0 = extractelement <2 x i8> %x, i32 0
+  %call.i = call i8 @llvm.ctlz.i8(i8 %0, i1 false)
+  %vecinit = insertelement <2 x i8> poison, i8 %call.i, i32 0
+  %1 = extractelement <2 x i8> %x, i32 1
+  %call.i4 = call i8 @llvm.ctlz.i8(i8 %1, i1 false)
+  %vecinit2 = insertelement <2 x i8> %vecinit, i8 %call.i4, i32 1
+  ret <2 x i8> %vecinit2
+}
+
+declare i8 @llvm.ctlz.i8(i8, i1) #3
+
+attributes #0 = { alwaysinline nounwind "less-precise-fpmad"="false" "frame-pointer"="all" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "stack-protector-buffer-size"="8" "unsafe-fp-math"="false" "use-soft-float"="false" }
+attributes #1 = { nounwind readnone }

>From de49dcee38ec976c08dc11d71246e1a1a4e60a49 Mon Sep 17 00:00:00 2001
From: Thomas Raoux <thomas.raoux at openai.com>
Date: Wed, 6 Sep 2023 11:33:10 -0700
Subject: [PATCH 2/2] Scalarize i16x2 op when not natively support instead of
 expanding

---
 llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp   | 58 ++++++++++++++-----
 llvm/test/CodeGen/NVPTX/i16x2-instructions.ll | 30 ++++------
 2 files changed, 55 insertions(+), 33 deletions(-)

diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index 3e36a72e1a53b52..170a6b58df69c18 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -621,21 +621,21 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
     setOperationAction(ISD::CTLZ, Ty, Legal);
   }
 
-  setI16x2OperationAction(ISD::ABS, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::SMIN, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::SMAX, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::UMIN, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::UMAX, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::ABS, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::SMIN, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::SMAX, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::UMIN, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::UMAX, MVT::v2i16, Legal, Custom);
   setI16x2OperationAction(ISD::CTPOP, MVT::v2i16, Legal, Expand);
   setI16x2OperationAction(ISD::CTLZ, MVT::v2i16, Legal, Expand);
 
-  setI16x2OperationAction(ISD::ADD, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::SUB, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::AND, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::MUL, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::SHL, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::SREM, MVT::v2i16, Legal, Expand);
-  setI16x2OperationAction(ISD::UREM, MVT::v2i16, Legal, Expand);
+  setI16x2OperationAction(ISD::ADD, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::SUB, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::AND, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::MUL, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::SHL, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::SREM, MVT::v2i16, Legal, Custom);
+  setI16x2OperationAction(ISD::UREM, MVT::v2i16, Legal, Custom);
 
   setOperationAction(ISD::ADDC, MVT::i32, Legal);
   setOperationAction(ISD::ADDE, MVT::i32, Legal);
@@ -2418,7 +2418,26 @@ SDValue NVPTXTargetLowering::LowerFROUND64(SDValue Op,
   return DAG.getNode(ISD::SELECT, SL, VT, IsLarge, A, RoundedA);
 }
 
-
+static SDValue LowerVectorArith(SDValue Op, SelectionDAG &DAG) {
+  SDLoc DL(Op);
+  if (Op.getValueType() != MVT::v2i16)
+    return Op;
+  EVT EltVT = Op.getValueType().getVectorElementType();
+  SmallVector<SDValue> VecElements;
+  for (int I = 0, E = Op.getValueType().getVectorNumElements(); I < E; I++) {
+    SmallVector<SDValue> ScalarArgs;
+    for (int J = 0, NumOp = Op.getNumOperands(); J < NumOp; J++) {
+      SDValue Ext =
+          DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Op->getOperand(J),
+                      DAG.getIntPtrConstant(I, DL));
+      ScalarArgs.push_back(Ext);
+    }
+    VecElements.push_back(DAG.getNode(Op.getOpcode(), DL, EltVT, ScalarArgs));
+  }
+  SDValue V =
+      DAG.getNode(ISD::BUILD_VECTOR, DL, Op.getValueType(), VecElements);
+  return V;
+}
 
 SDValue
 NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
@@ -2456,6 +2475,19 @@ NVPTXTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
     return LowerVAARG(Op, DAG);
   case ISD::VASTART:
     return LowerVASTART(Op, DAG);
+  case ISD::ABS:
+  case ISD::SMIN:
+  case ISD::SMAX:
+  case ISD::UMIN:
+  case ISD::UMAX:
+  case ISD::ADD:
+  case ISD::SUB:
+  case ISD::AND:
+  case ISD::MUL:
+  case ISD::SHL:
+  case ISD::SREM:
+  case ISD::UREM:
+    return LowerVectorArith(Op, DAG);
   default:
     llvm_unreachable("Custom lowering not defined for operation");
   }
diff --git a/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll
index 265e2b99af2156f..8a8d7d913780cb2 100644
--- a/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/i16x2-instructions.ll
@@ -9,13 +9,11 @@
 ; RUN: %}
 ; ## No support for i16x2 instructions
 ; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
-; RUN:          -O0 -disable-post-ra -frame-pointer=all --nvptx-no-f16-math \
-; RUN:           -verify-machineinstrs \
+; RUN:          -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
 ; RUN: | FileCheck -allow-deprecated-dag-overlap -check-prefixes CHECK,CHECK-NOI16x2 %s
 ; RUN: %if ptxas %{                                                           \
 ; RUN:   llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_53 -asm-verbose=false \
-; RUN:          -O0 -disable-post-ra -frame-pointer=all --nvptx-no-f16-math   \
-; RUN:           -verify-machineinstrs                                        \
+; RUN:          -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
 ; RUN:   | %ptxas-verify -arch=sm_53                                          \
 ; RUN: %}
 
@@ -148,10 +146,8 @@ define <2 x i16> @test_sub(<2 x i16> %a, <2 x i16> %b) #0 {
 ;
 ;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
 ;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
-;	CHECK-NOI16x2-DAG:	setp.gt.s16 	[[P0:%p[0-9]+]], [[RS0]], [[RS2]];
-;	CHECK-NOI16x2-DAG:	setp.gt.s16 	[[P1:%p[0-9]+]], [[RS1]], [[RS3]];
-;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]], [[P0]];
-;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]], [[P1]];
+;	CHECK-NOI16x2-DAG: max.s16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG: max.s16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]];
 ;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
 ;
 ; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
@@ -170,10 +166,8 @@ define <2 x i16> @test_smax(<2 x i16> %a, <2 x i16> %b) #0 {
 ;
 ;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
 ;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
-;	CHECK-NOI16x2-DAG:	setp.gt.u16 	[[P0:%p[0-9]+]], [[RS0]], [[RS2]];
-;	CHECK-NOI16x2-DAG:	setp.gt.u16 	[[P1:%p[0-9]+]], [[RS1]], [[RS3]];
-;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]], [[P0]];
-;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]], [[P1]];
+;	CHECK-NOI16x2-DAG: max.u16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG: max.u16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]];
 ;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
 ;
 ; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
@@ -192,10 +186,8 @@ define <2 x i16> @test_umax(<2 x i16> %a, <2 x i16> %b) #0 {
 ;
 ;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
 ;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
-;	CHECK-NOI16x2-DAG:	setp.le.s16 	[[P0:%p[0-9]+]], [[RS0]], [[RS2]];
-;	CHECK-NOI16x2-DAG:	setp.le.s16 	[[P1:%p[0-9]+]], [[RS1]], [[RS3]];
-;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]], [[P0]];
-;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]], [[P1]];
+;	CHECK-NOI16x2-DAG: min.s16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG: min.s16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]];
 ;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
 ;
 ; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];
@@ -214,10 +206,8 @@ define <2 x i16> @test_smin(<2 x i16> %a, <2 x i16> %b) #0 {
 ;
 ;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS0:%rs[0-9]+]], [[RS1:%rs[0-9]+]]}, [[A]];
 ;	CHECK-NOI16x2-DAG: mov.b32 	{[[RS2:%rs[0-9]+]], [[RS3:%rs[0-9]+]]}, [[B]];
-;	CHECK-NOI16x2-DAG:	setp.le.u16 	[[P0:%p[0-9]+]], [[RS0]], [[RS2]];
-;	CHECK-NOI16x2-DAG:	setp.le.u16 	[[P1:%p[0-9]+]], [[RS1]], [[RS3]];
-;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]], [[P0]];
-;	CHECK-NOI16x2-DAG:	selp.b16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]], [[P1]];
+;	CHECK-NOI16x2-DAG: min.u16 	[[RS4:%rs[0-9]+]], [[RS0]], [[RS2]];
+;	CHECK-NOI16x2-DAG: min.u16 	[[RS5:%rs[0-9]+]], [[RS1]], [[RS3]];
 ;	CHECK-NOI16x2-DAG: mov.b32 	[[R:%r[0-9]+]], {[[RS4]], [[RS5]]};
 ;
 ; CHECK-NEXT: st.param.b32    [func_retval0+0], [[R]];



More information about the llvm-commits mailing list