[llvm] [NVPTX] legalize v2i32 to improve compatibility with v2f32 (PR #153478)

Princeton Ferro via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 16 15:51:24 PDT 2025


https://github.com/Prince781 updated https://github.com/llvm/llvm-project/pull/153478

>From 7a6fee1114e4cb01005ff4499e95e8d5f2ea6ab2 Mon Sep 17 00:00:00 2001
From: Princeton Ferro <pferro at nvidia.com>
Date: Tue, 12 Aug 2025 18:01:46 -0700
Subject: [PATCH] [NVPTX] legalize v2i32 to improve codegen of v2f32 ops

Since v2f32 is legal but v2i32 is not, this causes some sequences of
operations like bitcast (build_vector) to be lowered inefficiently.
---
 llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp   |   1 +
 llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp   |  54 +-
 llvm/lib/Target/NVPTX/NVPTXInstrInfo.td       |   8 +-
 llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td    |   3 +-
 llvm/lib/Target/NVPTX/NVPTXUtilities.h        |   4 +-
 .../test/CodeGen/NVPTX/f32x2-convert-i32x2.ll | 119 +++++
 llvm/test/CodeGen/NVPTX/f32x2-instructions.ll | 170 ++++--
 llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll  |  30 +-
 .../load-store-256-addressing-invariant.ll    |  28 +-
 .../NVPTX/load-store-256-addressing.ll        |  28 +-
 .../CodeGen/NVPTX/load-store-vectors-256.ll   | 456 ++++++++--------
 .../NVPTX/load-with-non-coherent-cache.ll     |  12 +-
 .../CodeGen/NVPTX/reduction-intrinsics.ll     | 486 ++++++++++++------
 13 files changed, 895 insertions(+), 504 deletions(-)
 create mode 100644 llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll

diff --git a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
index c70f48af33cf2..efffca279499d 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelDAGToDAG.cpp
@@ -1018,6 +1018,7 @@ pickOpcodeForVT(MVT::SimpleValueType VT, std::optional<unsigned> Opcode_i16,
   case MVT::f32:
     return Opcode_i32;
   case MVT::v2f32:
+  case MVT::v2i32:
   case MVT::i64:
   case MVT::f64:
     return Opcode_i64;
diff --git a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
index d3fb657851fe2..7d7579b491c37 100644
--- a/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
+++ b/llvm/lib/Target/NVPTX/NVPTXISelLowering.cpp
@@ -226,21 +226,20 @@ getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI,
   switch (VectorVT.SimpleTy) {
   default:
     return std::nullopt;
+
   case MVT::v4i64:
   case MVT::v4f64:
-  case MVT::v8i32:
-    // This is a "native" vector type iff the address space is global
-    // and the target supports 256-bit loads/stores
+    // This is a "native" vector type iff the address space is global and the
+    // target supports 256-bit loads/stores
     if (!CanLowerTo256Bit)
       return std::nullopt;
     LLVM_FALLTHROUGH;
   case MVT::v2i8:
-  case MVT::v2i32:
   case MVT::v2i64:
   case MVT::v2f64:
-  case MVT::v4i32:
     // This is a "native" vector type
     return std::pair(NumElts, EltVT);
+
   case MVT::v16f16:  // <8 x f16x2>
   case MVT::v16bf16: // <8 x bf16x2>
   case MVT::v16i16:  // <8 x i16x2>
@@ -264,12 +263,18 @@ getVectorLoweringShape(EVT VectorEVT, const NVPTXSubtarget &STI,
   case MVT::v16i8:  // <4 x i8x4>
     PackRegSize = 32;
     break;
-  case MVT::v8f32: // <4 x f32x2>
+
+  case MVT::v8f32:  // <4 x f32x2>
+  case MVT::v8i32:  // <4 x i32x2>
+    // This is a "native" vector type iff the address space is global and the
+    // target supports 256-bit loads/stores
     if (!CanLowerTo256Bit)
       return std::nullopt;
     LLVM_FALLTHROUGH;
-  case MVT::v2f32: // <1 x f32x2>
-  case MVT::v4f32: // <2 x f32x2>
+  case MVT::v2f32:  // <1 x f32x2>
+  case MVT::v4f32:  // <2 x f32x2>
+  case MVT::v2i32:  // <1 x i32x2>
+  case MVT::v4i32:  // <2 x i32x2>
     if (!STI.hasF32x2Instructions())
       return std::pair(NumElts, EltVT);
     PackRegSize = 64;
@@ -590,8 +595,10 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   addRegisterClass(MVT::bf16, &NVPTX::B16RegClass);
   addRegisterClass(MVT::v2bf16, &NVPTX::B32RegClass);
 
-  if (STI.hasF32x2Instructions())
+  if (STI.hasF32x2Instructions()) {
     addRegisterClass(MVT::v2f32, &NVPTX::B64RegClass);
+    addRegisterClass(MVT::v2i32, &NVPTX::B64RegClass);
+  }
 
   // Conversion to/from FP16/FP16x2 is always legal.
   setOperationAction(ISD::BUILD_VECTOR, MVT::v2f16, Custom);
@@ -628,12 +635,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i8, Custom);
   setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4i8, Custom);
 
-  // No support for these operations with v2f32.
-  setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f32, Expand);
-  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f32, Expand);
+  // No support for these operations with v2f32/v2i32
+  setOperationAction(ISD::INSERT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32}, Expand);
+  setOperationAction(ISD::VECTOR_SHUFFLE, {MVT::v2f32, MVT::v2i32}, Expand);
   // Need custom lowering in case the index is dynamic.
   if (STI.hasF32x2Instructions())
-    setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f32, Custom);
+    setOperationAction(ISD::EXTRACT_VECTOR_ELT, {MVT::v2f32, MVT::v2i32},
+                       Custom);
 
   // Custom conversions to/from v2i8.
   setOperationAction(ISD::BITCAST, MVT::v2i8, Custom);
@@ -661,14 +669,13 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   // Operations not directly supported by NVPTX.
   for (MVT VT : {MVT::bf16, MVT::f16, MVT::v2bf16, MVT::v2f16, MVT::f32,
                  MVT::v2f32, MVT::f64, MVT::i1, MVT::i8, MVT::i16, MVT::v2i16,
-                 MVT::v4i8, MVT::i32, MVT::i64}) {
+                 MVT::v4i8, MVT::i32, MVT::v2i32, MVT::i64}) {
     setOperationAction(ISD::SELECT_CC, VT, Expand);
     setOperationAction(ISD::BR_CC, VT, Expand);
   }
 
-  // Not directly supported. TLI would attempt to expand operations like
-  // FMINIMUM(v2f32) using invalid SETCC and VSELECT nodes.
-  setOperationAction(ISD::VSELECT, MVT::v2f32, Expand);
+  // We don't want ops like FMINIMUM or UMAX to be lowered to SETCC+VSELECT.
+  setOperationAction(ISD::VSELECT, {MVT::v2f32, MVT::v2i32}, Expand);
 
   // Some SIGN_EXTEND_INREG can be done using cvt instruction.
   // For others we will expand to a SHL/SRA pair.
@@ -815,7 +822,14 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   setOperationAction({ISD::SDIV, ISD::UDIV, ISD::SRA, ISD::SRL, ISD::MULHS,
                       ISD::MULHU, ISD::FP_TO_SINT, ISD::FP_TO_UINT,
                       ISD::SINT_TO_FP, ISD::UINT_TO_FP, ISD::SETCC},
-                     MVT::v2i16, Expand);
+                     {MVT::v2i16, MVT::v2i32}, Expand);
+
+  // v2i32 is not supported for any arithmetic operations
+  setOperationAction({ISD::ABS, ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX,
+                      ISD::CTPOP, ISD::CTLZ, ISD::ADD, ISD::SUB, ISD::MUL,
+                      ISD::SHL, ISD::SRA, ISD::SRL, ISD::OR, ISD::AND, ISD::XOR,
+                      ISD::SREM, ISD::UREM},
+                     MVT::v2i32, Expand);
 
   setOperationAction(ISD::ADDC, MVT::i32, Legal);
   setOperationAction(ISD::ADDE, MVT::i32, Legal);
@@ -829,7 +843,7 @@ NVPTXTargetLowering::NVPTXTargetLowering(const NVPTXTargetMachine &TM,
   }
 
   setOperationAction(ISD::CTTZ, MVT::i16, Expand);
-  setOperationAction(ISD::CTTZ, MVT::v2i16, Expand);
+  setOperationAction(ISD::CTTZ, {MVT::v2i16, MVT::v2i32}, Expand);
   setOperationAction(ISD::CTTZ, MVT::i32, Expand);
   setOperationAction(ISD::CTTZ, MVT::i64, Expand);
 
@@ -5673,7 +5687,7 @@ static SDValue PerformEXTRACTCombine(SDNode *N,
       IsPTXVectorType(VectorVT.getSimpleVT()))
     return SDValue(); // Native vector loads already combine nicely w/
                       // extract_vector_elt.
-  // Don't mess with singletons or packed types (v2f32, v2*16, v4i8 and v8i8),
+  // Don't mess with singletons or packed types (v2*32, v2*16, v4i8 and v8i8),
   // we already handle them OK.
   if (VectorVT.getVectorNumElements() == 1 ||
       NVPTX::isPackedVectorTy(VectorVT) || VectorVT == MVT::v8i8)
diff --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index 4e38e026e6bda..d813afc9577d9 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -754,8 +754,10 @@ def : Pat<(vt (select i1:$p, vt:$a, vt:$b)),
           (SELP_b32rr $a, $b, $p)>;
 }
 
-def : Pat<(v2f32 (select i1:$p, v2f32:$a, v2f32:$b)),
+foreach vt = [v2f32, v2i32] in {
+def : Pat<(vt (select i1:$p, vt:$a, vt:$b)),
           (SELP_b64rr $a, $b, $p)>;
+}
 
 //-----------------------------------
 // Test Instructions
@@ -2092,8 +2094,8 @@ foreach vt = [v2f16, v2bf16, v2i16] in {
             (V2I16toI32 $a, $b)>;
 }
 
-// Same thing for the 64-bit type v2f32.
-foreach vt = [v2f32] in {
+// Handle extracting one element from the pair (64-bit types)
+foreach vt = [v2f32, v2i32] in {
   def : Pat<(extractelt vt:$src, 0), (I64toI32L_Sink $src)>, Requires<[hasPTX<71>]>;
   def : Pat<(extractelt vt:$src, 1), (I64toI32H_Sink $src)>, Requires<[hasPTX<71>]>;
 
diff --git a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
index 2e81ab122d1df..913487b64617a 100644
--- a/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXRegisterInfo.td
@@ -54,7 +54,8 @@ def B16 : NVPTXRegClass<[i16, f16, bf16], 16, (add (sequence "RS%u", 0, 4))>;
 def B32 : NVPTXRegClass<[i32, v2f16, v2bf16, v2i16, v4i8, f32], 32,
                               (add (sequence "R%u", 0, 4),
                               VRFrame32, VRFrameLocal32)>;
-def B64 : NVPTXRegClass<[i64, v2f32, f64], 64, (add (sequence "RL%u", 0, 4),
+def B64 : NVPTXRegClass<[i64, v2i32, v2f32, f64], 64,
+                        (add (sequence "RL%u", 0, 4),
                          VRFrame64, VRFrameLocal64)>;
 // 128-bit regs are not defined as general regs in NVPTX. They are used for inlineASM only.
 def B128 : NVPTXRegClass<[i128], 128, (add (sequence "RQ%u", 0, 4))>;
diff --git a/llvm/lib/Target/NVPTX/NVPTXUtilities.h b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
index a070789f85e0b..4b5cb30fd3036 100644
--- a/llvm/lib/Target/NVPTX/NVPTXUtilities.h
+++ b/llvm/lib/Target/NVPTX/NVPTXUtilities.h
@@ -99,8 +99,8 @@ namespace NVPTX {
 // register. NOTE: This must be kept in sync with the register classes
 // defined in NVPTXRegisterInfo.td.
 inline auto packed_types() {
-  static const auto PackedTypes = {MVT::v4i8, MVT::v2f16, MVT::v2bf16,
-                                   MVT::v2i16, MVT::v2f32};
+  static const auto PackedTypes = {MVT::v4i8,  MVT::v2f16, MVT::v2bf16,
+                                   MVT::v2i16, MVT::v2f32, MVT::v2i32};
   return PackedTypes;
 }
 
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll
new file mode 100644
index 0000000000000..18fb87935d17d
--- /dev/null
+++ b/llvm/test/CodeGen/NVPTX/f32x2-convert-i32x2.ll
@@ -0,0 +1,119 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mcpu=sm_90a -O0 -disable-post-ra -frame-pointer=all          \
+; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-SM90A %s
+; RUN: %if ptxas-12.7 %{                                                      \
+; RUN:  llc < %s -mcpu=sm_90a -O0 -disable-post-ra -frame-pointer=all         \
+; RUN:  -verify-machineinstrs | %ptxas-verify -arch=sm_90a                    \
+; RUN: %}
+; RUN: llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all          \
+; RUN: -verify-machineinstrs | FileCheck --check-prefixes=CHECK,CHECK-SM100 %s
+; RUN: %if ptxas-12.7 %{                                                      \
+; RUN:  llc < %s -mcpu=sm_100 -O0 -disable-post-ra -frame-pointer=all         \
+; RUN:  -verify-machineinstrs | %ptxas-verify -arch=sm_100                    \
+; RUN: %}
+
+; Test that v2i32 -> v2f32 conversions don't emit bitwise operations on i64.
+
+target triple = "nvptx64-nvidia-cuda"
+
+declare <2 x i32> @return_i32x2(i32 %0)
+
+; Test with v2i32.
+define ptx_kernel void @store_i32x2(i32 %0, ptr %p) {
+; CHECK-SM90A-LABEL: store_i32x2(
+; CHECK-SM90A:       {
+; CHECK-SM90A-NEXT:    .reg .b32 %r<6>;
+; CHECK-SM90A-NEXT:    .reg .b64 %rd<2>;
+; CHECK-SM90A-EMPTY:
+; CHECK-SM90A-NEXT:  // %bb.0:
+; CHECK-SM90A-NEXT:    ld.param.b64 %rd1, [store_i32x2_param_1];
+; CHECK-SM90A-NEXT:    ld.param.b32 %r1, [store_i32x2_param_0];
+; CHECK-SM90A-NEXT:    { // callseq 0, 0
+; CHECK-SM90A-NEXT:    .param .b32 param0;
+; CHECK-SM90A-NEXT:    .param .align 8 .b8 retval0[8];
+; CHECK-SM90A-NEXT:    st.param.b32 [param0], %r1;
+; CHECK-SM90A-NEXT:    call.uni (retval0), return_i32x2, (param0);
+; CHECK-SM90A-NEXT:    ld.param.v2.b32 {%r2, %r3}, [retval0];
+; CHECK-SM90A-NEXT:    } // callseq 0
+; CHECK-SM90A-NEXT:    add.rn.f32 %r4, %r3, %r3;
+; CHECK-SM90A-NEXT:    add.rn.f32 %r5, %r2, %r2;
+; CHECK-SM90A-NEXT:    st.v2.b32 [%rd1], {%r5, %r4};
+; CHECK-SM90A-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: store_i32x2(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<2>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<4>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.b64 %rd1, [store_i32x2_param_1];
+; CHECK-SM100-NEXT:    ld.param.b32 %r1, [store_i32x2_param_0];
+; CHECK-SM100-NEXT:    { // callseq 0, 0
+; CHECK-SM100-NEXT:    .param .b32 param0;
+; CHECK-SM100-NEXT:    .param .align 8 .b8 retval0[8];
+; CHECK-SM100-NEXT:    st.param.b32 [param0], %r1;
+; CHECK-SM100-NEXT:    call.uni (retval0), return_i32x2, (param0);
+; CHECK-SM100-NEXT:    ld.param.b64 %rd2, [retval0];
+; CHECK-SM100-NEXT:    } // callseq 0
+; CHECK-SM100-NEXT:    add.rn.f32x2 %rd3, %rd2, %rd2;
+; CHECK-SM100-NEXT:    st.b64 [%rd1], %rd3;
+; CHECK-SM100-NEXT:    ret;
+  %v = call <2 x i32> @return_i32x2(i32 %0)
+  %v.f32x2 = bitcast <2 x i32> %v to <2 x float>
+  %res = fadd <2 x float> %v.f32x2, %v.f32x2
+  store <2 x float> %res, ptr %p, align 8
+  ret void
+}
+
+; Test with inline ASM returning { <1 x float>, <1 x float> }, which decays to
+; v2i32.
+define ptx_kernel void @inlineasm(ptr %p) {
+; CHECK-SM90A-LABEL: inlineasm(
+; CHECK-SM90A:       {
+; CHECK-SM90A-NEXT:    .reg .b32 %r<7>;
+; CHECK-SM90A-NEXT:    .reg .b64 %rd<2>;
+; CHECK-SM90A-EMPTY:
+; CHECK-SM90A-NEXT:  // %bb.0:
+; CHECK-SM90A-NEXT:    ld.param.b64 %rd1, [inlineasm_param_0];
+; CHECK-SM90A-NEXT:    mov.b32 %r3, 0;
+; CHECK-SM90A-NEXT:    mov.b32 %r4, %r3;
+; CHECK-SM90A-NEXT:    mov.b32 %r2, %r4;
+; CHECK-SM90A-NEXT:    mov.b32 %r1, %r3;
+; CHECK-SM90A-NEXT:    // begin inline asm
+; CHECK-SM90A-NEXT:    // nop
+; CHECK-SM90A-NEXT:    // end inline asm
+; CHECK-SM90A-NEXT:    mul.rn.f32 %r5, %r2, 0f00000000;
+; CHECK-SM90A-NEXT:    mul.rn.f32 %r6, %r1, 0f00000000;
+; CHECK-SM90A-NEXT:    st.v2.b32 [%rd1], {%r6, %r5};
+; CHECK-SM90A-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: inlineasm(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<6>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.b64 %rd1, [inlineasm_param_0];
+; CHECK-SM100-NEXT:    mov.b32 %r3, 0;
+; CHECK-SM100-NEXT:    mov.b32 %r4, %r3;
+; CHECK-SM100-NEXT:    mov.b32 %r2, %r4;
+; CHECK-SM100-NEXT:    mov.b32 %r1, %r3;
+; CHECK-SM100-NEXT:    // begin inline asm
+; CHECK-SM100-NEXT:    // nop
+; CHECK-SM100-NEXT:    // end inline asm
+; CHECK-SM100-NEXT:    mov.b64 %rd2, {%r1, %r2};
+; CHECK-SM100-NEXT:    mov.b32 %r5, 0f00000000;
+; CHECK-SM100-NEXT:    mov.b64 %rd3, {%r5, %r5};
+; CHECK-SM100-NEXT:    mul.rn.f32x2 %rd4, %rd2, %rd3;
+; CHECK-SM100-NEXT:    st.b64 [%rd1], %rd4;
+; CHECK-SM100-NEXT:    ret;
+  %r = call { <1 x float>, <1 x float> } asm sideeffect "// nop", "=f,=f,0,1"(<1 x float> zeroinitializer, <1 x float> zeroinitializer)
+  %i0 = extractvalue { <1 x float>, <1 x float> } %r, 0
+  %i1 = extractvalue { <1 x float>, <1 x float> } %r, 1
+  %i4 = shufflevector <1 x float> %i0, <1 x float> %i1, <2 x i32> <i32 0, i32 1>
+  %mul = fmul < 2 x float> %i4, zeroinitializer
+  store <2 x float> %mul, ptr %p, align 8
+  ret void
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; CHECK: {{.*}}
diff --git a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
index 217bb483682ff..a90cfff51e2c6 100644
--- a/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/f32x2-instructions.ll
@@ -1938,16 +1938,29 @@ define <2 x i64> @test_fptoui_2xi64(<2 x float> %a) #0 {
 }
 
 define <2 x float> @test_uitofp_2xi32(<2 x i32> %a) #0 {
-; CHECK-LABEL: test_uitofp_2xi32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
-; CHECK-NEXT:    cvt.rn.f32.u32 %r3, %r2;
-; CHECK-NEXT:    cvt.rn.f32.u32 %r4, %r1;
-; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
-; CHECK-NEXT:    ret;
+; CHECK-NOF32X2-LABEL: test_uitofp_2xi32(
+; CHECK-NOF32X2:       {
+; CHECK-NOF32X2-NEXT:    .reg .b32 %r<5>;
+; CHECK-NOF32X2-EMPTY:
+; CHECK-NOF32X2-NEXT:  // %bb.0:
+; CHECK-NOF32X2-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_param_0];
+; CHECK-NOF32X2-NEXT:    cvt.rn.f32.u32 %r3, %r2;
+; CHECK-NOF32X2-NEXT:    cvt.rn.f32.u32 %r4, %r1;
+; CHECK-NOF32X2-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOF32X2-NEXT:    ret;
+;
+; CHECK-F32X2-LABEL: test_uitofp_2xi32(
+; CHECK-F32X2:       {
+; CHECK-F32X2-NEXT:    .reg .b32 %r<5>;
+; CHECK-F32X2-NEXT:    .reg .b64 %rd<2>;
+; CHECK-F32X2-EMPTY:
+; CHECK-F32X2-NEXT:  // %bb.0:
+; CHECK-F32X2-NEXT:    ld.param.b64 %rd1, [test_uitofp_2xi32_param_0];
+; CHECK-F32X2-NEXT:    mov.b64 {%r1, %r2}, %rd1;
+; CHECK-F32X2-NEXT:    cvt.rn.f32.u32 %r3, %r2;
+; CHECK-F32X2-NEXT:    cvt.rn.f32.u32 %r4, %r1;
+; CHECK-F32X2-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-F32X2-NEXT:    ret;
   %r = uitofp <2 x i32> %a to <2 x float>
   ret <2 x float> %r
 }
@@ -1969,16 +1982,29 @@ define <2 x float> @test_uitofp_2xi64(<2 x i64> %a) #0 {
 }
 
 define <2 x float> @test_sitofp_2xi32(<2 x i32> %a) #0 {
-; CHECK-LABEL: test_sitofp_2xi32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
-; CHECK-NEXT:    cvt.rn.f32.s32 %r3, %r2;
-; CHECK-NEXT:    cvt.rn.f32.s32 %r4, %r1;
-; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
-; CHECK-NEXT:    ret;
+; CHECK-NOF32X2-LABEL: test_sitofp_2xi32(
+; CHECK-NOF32X2:       {
+; CHECK-NOF32X2-NEXT:    .reg .b32 %r<5>;
+; CHECK-NOF32X2-EMPTY:
+; CHECK-NOF32X2-NEXT:  // %bb.0:
+; CHECK-NOF32X2-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_param_0];
+; CHECK-NOF32X2-NEXT:    cvt.rn.f32.s32 %r3, %r2;
+; CHECK-NOF32X2-NEXT:    cvt.rn.f32.s32 %r4, %r1;
+; CHECK-NOF32X2-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOF32X2-NEXT:    ret;
+;
+; CHECK-F32X2-LABEL: test_sitofp_2xi32(
+; CHECK-F32X2:       {
+; CHECK-F32X2-NEXT:    .reg .b32 %r<5>;
+; CHECK-F32X2-NEXT:    .reg .b64 %rd<2>;
+; CHECK-F32X2-EMPTY:
+; CHECK-F32X2-NEXT:  // %bb.0:
+; CHECK-F32X2-NEXT:    ld.param.b64 %rd1, [test_sitofp_2xi32_param_0];
+; CHECK-F32X2-NEXT:    mov.b64 {%r1, %r2}, %rd1;
+; CHECK-F32X2-NEXT:    cvt.rn.f32.s32 %r3, %r2;
+; CHECK-F32X2-NEXT:    cvt.rn.f32.s32 %r4, %r1;
+; CHECK-F32X2-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-F32X2-NEXT:    ret;
   %r = sitofp <2 x i32> %a to <2 x float>
   ret <2 x float> %r
 }
@@ -2017,16 +2043,17 @@ define <2 x float> @test_uitofp_2xi32_fadd(<2 x i32> %a, <2 x float> %b) #0 {
 ; CHECK-F32X2-LABEL: test_uitofp_2xi32_fadd(
 ; CHECK-F32X2:       {
 ; CHECK-F32X2-NEXT:    .reg .b32 %r<5>;
-; CHECK-F32X2-NEXT:    .reg .b64 %rd<4>;
+; CHECK-F32X2-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-F32X2-EMPTY:
 ; CHECK-F32X2-NEXT:  // %bb.0:
-; CHECK-F32X2-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_fadd_param_0];
-; CHECK-F32X2-NEXT:    ld.param.b64 %rd1, [test_uitofp_2xi32_fadd_param_1];
+; CHECK-F32X2-NEXT:    ld.param.b64 %rd2, [test_uitofp_2xi32_fadd_param_1];
+; CHECK-F32X2-NEXT:    ld.param.b64 %rd1, [test_uitofp_2xi32_fadd_param_0];
+; CHECK-F32X2-NEXT:    mov.b64 {%r1, %r2}, %rd1;
 ; CHECK-F32X2-NEXT:    cvt.rn.f32.u32 %r3, %r2;
 ; CHECK-F32X2-NEXT:    cvt.rn.f32.u32 %r4, %r1;
-; CHECK-F32X2-NEXT:    mov.b64 %rd2, {%r4, %r3};
-; CHECK-F32X2-NEXT:    add.rn.f32x2 %rd3, %rd1, %rd2;
-; CHECK-F32X2-NEXT:    st.param.b64 [func_retval0], %rd3;
+; CHECK-F32X2-NEXT:    mov.b64 %rd3, {%r4, %r3};
+; CHECK-F32X2-NEXT:    add.rn.f32x2 %rd4, %rd2, %rd3;
+; CHECK-F32X2-NEXT:    st.param.b64 [func_retval0], %rd4;
 ; CHECK-F32X2-NEXT:    ret;
   %c = uitofp <2 x i32> %a to <2 x float>
   %r = fadd <2 x float> %b, %c
@@ -2114,14 +2141,23 @@ define <2 x i32> @test_bitcast_2xfloat_to_2xi32(<2 x float> %a) #0 {
 }
 
 define <2 x float> @test_bitcast_2xi32_to_2xfloat(<2 x i32> %a) #0 {
-; CHECK-LABEL: test_bitcast_2xi32_to_2xfloat(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<3>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0];
-; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r1, %r2};
-; CHECK-NEXT:    ret;
+; CHECK-NOF32X2-LABEL: test_bitcast_2xi32_to_2xfloat(
+; CHECK-NOF32X2:       {
+; CHECK-NOF32X2-NEXT:    .reg .b32 %r<3>;
+; CHECK-NOF32X2-EMPTY:
+; CHECK-NOF32X2-NEXT:  // %bb.0:
+; CHECK-NOF32X2-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_bitcast_2xi32_to_2xfloat_param_0];
+; CHECK-NOF32X2-NEXT:    st.param.v2.b32 [func_retval0], {%r1, %r2};
+; CHECK-NOF32X2-NEXT:    ret;
+;
+; CHECK-F32X2-LABEL: test_bitcast_2xi32_to_2xfloat(
+; CHECK-F32X2:       {
+; CHECK-F32X2-NEXT:    .reg .b64 %rd<2>;
+; CHECK-F32X2-EMPTY:
+; CHECK-F32X2-NEXT:  // %bb.0:
+; CHECK-F32X2-NEXT:    ld.param.b64 %rd1, [test_bitcast_2xi32_to_2xfloat_param_0];
+; CHECK-F32X2-NEXT:    st.param.b64 [func_retval0], %rd1;
+; CHECK-F32X2-NEXT:    ret;
   %r = bitcast <2 x i32> %a to <2 x float>
   ret <2 x float> %r
 }
@@ -2851,31 +2887,57 @@ define <2 x float> @test_insertelement(<2 x float> %a, float %x) #0 {
 }
 
 define <2 x float> @test_sitofp_2xi32_to_2xfloat(<2 x i32> %a) #0 {
-; CHECK-LABEL: test_sitofp_2xi32_to_2xfloat(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_to_2xfloat_param_0];
-; CHECK-NEXT:    cvt.rn.f32.s32 %r3, %r2;
-; CHECK-NEXT:    cvt.rn.f32.s32 %r4, %r1;
-; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
-; CHECK-NEXT:    ret;
+; CHECK-NOF32X2-LABEL: test_sitofp_2xi32_to_2xfloat(
+; CHECK-NOF32X2:       {
+; CHECK-NOF32X2-NEXT:    .reg .b32 %r<5>;
+; CHECK-NOF32X2-EMPTY:
+; CHECK-NOF32X2-NEXT:  // %bb.0:
+; CHECK-NOF32X2-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_sitofp_2xi32_to_2xfloat_param_0];
+; CHECK-NOF32X2-NEXT:    cvt.rn.f32.s32 %r3, %r2;
+; CHECK-NOF32X2-NEXT:    cvt.rn.f32.s32 %r4, %r1;
+; CHECK-NOF32X2-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOF32X2-NEXT:    ret;
+;
+; CHECK-F32X2-LABEL: test_sitofp_2xi32_to_2xfloat(
+; CHECK-F32X2:       {
+; CHECK-F32X2-NEXT:    .reg .b32 %r<5>;
+; CHECK-F32X2-NEXT:    .reg .b64 %rd<2>;
+; CHECK-F32X2-EMPTY:
+; CHECK-F32X2-NEXT:  // %bb.0:
+; CHECK-F32X2-NEXT:    ld.param.b64 %rd1, [test_sitofp_2xi32_to_2xfloat_param_0];
+; CHECK-F32X2-NEXT:    mov.b64 {%r1, %r2}, %rd1;
+; CHECK-F32X2-NEXT:    cvt.rn.f32.s32 %r3, %r2;
+; CHECK-F32X2-NEXT:    cvt.rn.f32.s32 %r4, %r1;
+; CHECK-F32X2-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-F32X2-NEXT:    ret;
   %r = sitofp <2 x i32> %a to <2 x float>
   ret <2 x float> %r
 }
 
 define <2 x float> @test_uitofp_2xi32_to_2xfloat(<2 x i32> %a) #0 {
-; CHECK-LABEL: test_uitofp_2xi32_to_2xfloat(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<5>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_to_2xfloat_param_0];
-; CHECK-NEXT:    cvt.rn.f32.u32 %r3, %r2;
-; CHECK-NEXT:    cvt.rn.f32.u32 %r4, %r1;
-; CHECK-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
-; CHECK-NEXT:    ret;
+; CHECK-NOF32X2-LABEL: test_uitofp_2xi32_to_2xfloat(
+; CHECK-NOF32X2:       {
+; CHECK-NOF32X2-NEXT:    .reg .b32 %r<5>;
+; CHECK-NOF32X2-EMPTY:
+; CHECK-NOF32X2-NEXT:  // %bb.0:
+; CHECK-NOF32X2-NEXT:    ld.param.v2.b32 {%r1, %r2}, [test_uitofp_2xi32_to_2xfloat_param_0];
+; CHECK-NOF32X2-NEXT:    cvt.rn.f32.u32 %r3, %r2;
+; CHECK-NOF32X2-NEXT:    cvt.rn.f32.u32 %r4, %r1;
+; CHECK-NOF32X2-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-NOF32X2-NEXT:    ret;
+;
+; CHECK-F32X2-LABEL: test_uitofp_2xi32_to_2xfloat(
+; CHECK-F32X2:       {
+; CHECK-F32X2-NEXT:    .reg .b32 %r<5>;
+; CHECK-F32X2-NEXT:    .reg .b64 %rd<2>;
+; CHECK-F32X2-EMPTY:
+; CHECK-F32X2-NEXT:  // %bb.0:
+; CHECK-F32X2-NEXT:    ld.param.b64 %rd1, [test_uitofp_2xi32_to_2xfloat_param_0];
+; CHECK-F32X2-NEXT:    mov.b64 {%r1, %r2}, %rd1;
+; CHECK-F32X2-NEXT:    cvt.rn.f32.u32 %r3, %r2;
+; CHECK-F32X2-NEXT:    cvt.rn.f32.u32 %r4, %r1;
+; CHECK-F32X2-NEXT:    st.param.v2.b32 [func_retval0], {%r4, %r3};
+; CHECK-F32X2-NEXT:    ret;
   %r = uitofp <2 x i32> %a to <2 x float>
   ret <2 x float> %r
 }
diff --git a/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll b/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll
index d219493d2b31b..acfe644cf483b 100644
--- a/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll
+++ b/llvm/test/CodeGen/NVPTX/ldg-invariant-256.ll
@@ -346,15 +346,19 @@ define i32 @ld_global_v8i32(ptr addrspace(1) %ptr) {
 ; SM100-LABEL: ld_global_v8i32(
 ; SM100:       {
 ; SM100-NEXT:    .reg .b32 %r<16>;
-; SM100-NEXT:    .reg .b64 %rd<2>;
+; SM100-NEXT:    .reg .b64 %rd<6>;
 ; SM100-EMPTY:
 ; SM100-NEXT:  // %bb.0:
 ; SM100-NEXT:    ld.param.b64 %rd1, [ld_global_v8i32_param_0];
-; SM100-NEXT:    ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
-; SM100-NEXT:    add.s32 %r9, %r1, %r2;
-; SM100-NEXT:    add.s32 %r10, %r3, %r4;
-; SM100-NEXT:    add.s32 %r11, %r5, %r6;
-; SM100-NEXT:    add.s32 %r12, %r7, %r8;
+; SM100-NEXT:    ld.global.nc.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; SM100-NEXT:    mov.b64 {%r1, %r2}, %rd5;
+; SM100-NEXT:    mov.b64 {%r3, %r4}, %rd4;
+; SM100-NEXT:    mov.b64 {%r5, %r6}, %rd3;
+; SM100-NEXT:    mov.b64 {%r7, %r8}, %rd2;
+; SM100-NEXT:    add.s32 %r9, %r7, %r8;
+; SM100-NEXT:    add.s32 %r10, %r5, %r6;
+; SM100-NEXT:    add.s32 %r11, %r3, %r4;
+; SM100-NEXT:    add.s32 %r12, %r1, %r2;
 ; SM100-NEXT:    add.s32 %r13, %r9, %r10;
 ; SM100-NEXT:    add.s32 %r14, %r11, %r12;
 ; SM100-NEXT:    add.s32 %r15, %r13, %r14;
@@ -444,10 +448,9 @@ define i64 @ld_global_v4i64(ptr addrspace(1) %ptr) {
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
 ; SM90-NEXT:    ld.param.b64 %rd1, [ld_global_v4i64_param_0];
-; SM90-NEXT:    ld.global.nc.v2.b64 {%rd2, %rd3}, [%rd1+16];
-; SM90-NEXT:    ld.global.nc.v2.b64 {%rd4, %rd5}, [%rd1];
-; SM90-NEXT:    add.s64 %rd6, %rd4, %rd5;
-; SM90-NEXT:    add.s64 %rd7, %rd2, %rd3;
+; SM90-NEXT:    ld.global.nc.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; SM90-NEXT:    add.s64 %rd6, %rd2, %rd3;
+; SM90-NEXT:    add.s64 %rd7, %rd4, %rd5;
 ; SM90-NEXT:    add.s64 %rd8, %rd6, %rd7;
 ; SM90-NEXT:    st.param.b64 [func_retval0], %rd8;
 ; SM90-NEXT:    ret;
@@ -482,10 +485,9 @@ define double @ld_global_v4f64(ptr addrspace(1) %ptr) {
 ; SM90-EMPTY:
 ; SM90-NEXT:  // %bb.0:
 ; SM90-NEXT:    ld.param.b64 %rd1, [ld_global_v4f64_param_0];
-; SM90-NEXT:    ld.global.nc.v2.b64 {%rd2, %rd3}, [%rd1+16];
-; SM90-NEXT:    ld.global.nc.v2.b64 {%rd4, %rd5}, [%rd1];
-; SM90-NEXT:    add.rn.f64 %rd6, %rd4, %rd5;
-; SM90-NEXT:    add.rn.f64 %rd7, %rd2, %rd3;
+; SM90-NEXT:    ld.global.nc.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; SM90-NEXT:    add.rn.f64 %rd6, %rd2, %rd3;
+; SM90-NEXT:    add.rn.f64 %rd7, %rd4, %rd5;
 ; SM90-NEXT:    add.rn.f64 %rd8, %rd6, %rd7;
 ; SM90-NEXT:    st.param.b64 [func_retval0], %rd8;
 ; SM90-NEXT:    ret;
diff --git a/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll b/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll
index 12e3287e73f0f..57852451c0c72 100644
--- a/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll
+++ b/llvm/test/CodeGen/NVPTX/load-store-256-addressing-invariant.ll
@@ -82,11 +82,11 @@ define void @avar_bfloat() {
 define void @avar_i32() {
 ; PTX-LABEL: avar_i32(
 ; PTX:       {
-; PTX-NEXT:    .reg .b32 %r<9>;
+; PTX-NEXT:    .reg .b64 %rd<5>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0:
-; PTX-NEXT:    ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin];
-; PTX-NEXT:    st.global.v8.b32 [globalout], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; PTX-NEXT:    ld.global.nc.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin];
+; PTX-NEXT:    st.global.v4.b64 [globalout], {%rd1, %rd2, %rd3, %rd4};
 ; PTX-NEXT:    ret;
   %load = load <8 x i32>, ptr addrspace(1) @globalin, !invariant.load !0
   store <8 x i32> %load, ptr addrspace(1) @globalout
@@ -202,11 +202,11 @@ define void @asi_bfloat() {
 define void @asi_i32() {
 ; PTX-LABEL: asi_i32(
 ; PTX:       {
-; PTX-NEXT:    .reg .b32 %r<9>;
+; PTX-NEXT:    .reg .b64 %rd<5>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0:
-; PTX-NEXT:    ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin+32];
-; PTX-NEXT:    st.global.v8.b32 [globalout+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; PTX-NEXT:    ld.global.nc.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin+32];
+; PTX-NEXT:    st.global.v4.b64 [globalout+32], {%rd1, %rd2, %rd3, %rd4};
 ; PTX-NEXT:    ret;
   %in.offset = getelementptr inbounds i8, ptr addrspace(1) @globalin, i32 32
   %load = load <8 x i32>, ptr addrspace(1) %in.offset, !invariant.load !0
@@ -331,14 +331,13 @@ define void @areg_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 define void @areg_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; PTX-LABEL: areg_64_i32(
 ; PTX:       {
-; PTX-NEXT:    .reg .b32 %r<9>;
-; PTX-NEXT:    .reg .b64 %rd<3>;
+; PTX-NEXT:    .reg .b64 %rd<7>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0:
 ; PTX-NEXT:    ld.param.b64 %rd1, [areg_64_i32_param_0];
-; PTX-NEXT:    ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
-; PTX-NEXT:    ld.param.b64 %rd2, [areg_64_i32_param_1];
-; PTX-NEXT:    st.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; PTX-NEXT:    ld.global.nc.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; PTX-NEXT:    ld.param.b64 %rd6, [areg_64_i32_param_1];
+; PTX-NEXT:    st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; PTX-NEXT:    ret;
   %load = load <8 x i32>, ptr addrspace(1) %in, !invariant.load !0
   store <8 x i32> %load, ptr addrspace(1) %out
@@ -472,14 +471,13 @@ define void @ari_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 define void @ari_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; PTX-LABEL: ari_64_i32(
 ; PTX:       {
-; PTX-NEXT:    .reg .b32 %r<9>;
-; PTX-NEXT:    .reg .b64 %rd<3>;
+; PTX-NEXT:    .reg .b64 %rd<7>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0:
 ; PTX-NEXT:    ld.param.b64 %rd1, [ari_64_i32_param_0];
 ; PTX-NEXT:    ld.param.b64 %rd2, [ari_64_i32_param_1];
-; PTX-NEXT:    ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1+32];
-; PTX-NEXT:    st.global.v8.b32 [%rd2+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; PTX-NEXT:    ld.global.nc.v4.b64 {%rd3, %rd4, %rd5, %rd6}, [%rd1+32];
+; PTX-NEXT:    st.global.v4.b64 [%rd2+32], {%rd3, %rd4, %rd5, %rd6};
 ; PTX-NEXT:    ret;
   %in.offset = getelementptr inbounds i8, ptr addrspace(1) %in, i32 32
   %load = load <8 x i32>, ptr addrspace(1) %in.offset, !invariant.load !0
diff --git a/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll b/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll
index b7fa1dd5f2c4d..21604dfbf0013 100644
--- a/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll
+++ b/llvm/test/CodeGen/NVPTX/load-store-256-addressing.ll
@@ -78,11 +78,11 @@ define void @avar_bfloat() {
 define void @avar_i32() {
 ; PTX-LABEL: avar_i32(
 ; PTX:       {
-; PTX-NEXT:    .reg .b32 %r<9>;
+; PTX-NEXT:    .reg .b64 %rd<5>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0:
-; PTX-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin];
-; PTX-NEXT:    st.global.v8.b32 [globalout], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; PTX-NEXT:    ld.global.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin];
+; PTX-NEXT:    st.global.v4.b64 [globalout], {%rd1, %rd2, %rd3, %rd4};
 ; PTX-NEXT:    ret;
   %load = load <8 x i32>, ptr addrspace(1) @globalin
   store <8 x i32> %load, ptr addrspace(1) @globalout
@@ -198,11 +198,11 @@ define void @asi_bfloat() {
 define void @asi_i32() {
 ; PTX-LABEL: asi_i32(
 ; PTX:       {
-; PTX-NEXT:    .reg .b32 %r<9>;
+; PTX-NEXT:    .reg .b64 %rd<5>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0:
-; PTX-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [globalin+32];
-; PTX-NEXT:    st.global.v8.b32 [globalout+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; PTX-NEXT:    ld.global.v4.b64 {%rd1, %rd2, %rd3, %rd4}, [globalin+32];
+; PTX-NEXT:    st.global.v4.b64 [globalout+32], {%rd1, %rd2, %rd3, %rd4};
 ; PTX-NEXT:    ret;
   %in.offset = getelementptr inbounds i8, ptr addrspace(1) @globalin, i32 32
   %load = load <8 x i32>, ptr addrspace(1) %in.offset
@@ -327,14 +327,13 @@ define void @areg_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 define void @areg_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; PTX-LABEL: areg_64_i32(
 ; PTX:       {
-; PTX-NEXT:    .reg .b32 %r<9>;
-; PTX-NEXT:    .reg .b64 %rd<3>;
+; PTX-NEXT:    .reg .b64 %rd<7>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0:
 ; PTX-NEXT:    ld.param.b64 %rd1, [areg_64_i32_param_0];
-; PTX-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
-; PTX-NEXT:    ld.param.b64 %rd2, [areg_64_i32_param_1];
-; PTX-NEXT:    st.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; PTX-NEXT:    ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; PTX-NEXT:    ld.param.b64 %rd6, [areg_64_i32_param_1];
+; PTX-NEXT:    st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; PTX-NEXT:    ret;
   %load = load <8 x i32>, ptr addrspace(1) %in
   store <8 x i32> %load, ptr addrspace(1) %out
@@ -468,14 +467,13 @@ define void @ari_64_bfloat(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 define void @ari_64_i32(ptr addrspace(1) %in, ptr addrspace(1) %out) {
 ; PTX-LABEL: ari_64_i32(
 ; PTX:       {
-; PTX-NEXT:    .reg .b32 %r<9>;
-; PTX-NEXT:    .reg .b64 %rd<3>;
+; PTX-NEXT:    .reg .b64 %rd<7>;
 ; PTX-EMPTY:
 ; PTX-NEXT:  // %bb.0:
 ; PTX-NEXT:    ld.param.b64 %rd1, [ari_64_i32_param_0];
 ; PTX-NEXT:    ld.param.b64 %rd2, [ari_64_i32_param_1];
-; PTX-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1+32];
-; PTX-NEXT:    st.global.v8.b32 [%rd2+32], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; PTX-NEXT:    ld.global.v4.b64 {%rd3, %rd4, %rd5, %rd6}, [%rd1+32];
+; PTX-NEXT:    st.global.v4.b64 [%rd2+32], {%rd3, %rd4, %rd5, %rd6};
 ; PTX-NEXT:    ret;
   %in.offset = getelementptr inbounds i8, ptr addrspace(1) %in, i32 32
   %load = load <8 x i32>, ptr addrspace(1) %in.offset
diff --git a/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll b/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll
index e8b43ad28ad27..3e8322511d5c6 100644
--- a/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll
+++ b/llvm/test/CodeGen/NVPTX/load-store-vectors-256.ll
@@ -100,19 +100,32 @@ define void @generic_16xbfloat(ptr %a, ptr %b) {
 }
 
 define void @generic_8xi32(ptr %a, ptr %b) {
-; CHECK-LABEL: generic_8xi32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<9>;
-; CHECK-NEXT:    .reg .b64 %rd<3>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b64 %rd1, [generic_8xi32_param_0];
-; CHECK-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; CHECK-NEXT:    ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
-; CHECK-NEXT:    ld.param.b64 %rd2, [generic_8xi32_param_1];
-; CHECK-NEXT:    st.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
-; CHECK-NEXT:    st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
-; CHECK-NEXT:    ret;
+; SM90-LABEL: generic_8xi32(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [generic_8xi32_param_0];
+; SM90-NEXT:    ld.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; SM90-NEXT:    ld.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
+; SM90-NEXT:    ld.param.b64 %rd2, [generic_8xi32_param_1];
+; SM90-NEXT:    st.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
+; SM90-NEXT:    st.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: generic_8xi32(
+; SM100:       {
+; SM100-NEXT:    .reg .b64 %rd<7>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [generic_8xi32_param_0];
+; SM100-NEXT:    ld.v2.b64 {%rd2, %rd3}, [%rd1];
+; SM100-NEXT:    ld.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; SM100-NEXT:    ld.param.b64 %rd6, [generic_8xi32_param_1];
+; SM100-NEXT:    st.v2.b64 [%rd6+16], {%rd4, %rd5};
+; SM100-NEXT:    st.v2.b64 [%rd6], {%rd2, %rd3};
+; SM100-NEXT:    ret;
   %a.load = load <8 x i32>, ptr %a
   store <8 x i32> %a.load, ptr %b
   ret void
@@ -125,11 +138,9 @@ define void @generic_4xi64(ptr %a, ptr %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [generic_4xi64_param_0];
-; CHECK-NEXT:    ld.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [generic_4xi64_param_1];
-; CHECK-NEXT:    st.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load <4 x i64>, ptr %a
   store <4 x i64> %a.load, ptr %b
@@ -175,11 +186,9 @@ define void @generic_4xdouble(ptr %a, ptr %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [generic_4xdouble_param_0];
-; CHECK-NEXT:    ld.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [generic_4xdouble_param_1];
-; CHECK-NEXT:    st.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load <4 x double>, ptr %a
   store <4 x double> %a.load, ptr %b
@@ -265,19 +274,32 @@ define void @generic_volatile_16xbfloat(ptr %a, ptr %b) {
 }
 
 define void @generic_volatile_8xi32(ptr %a, ptr %b) {
-; CHECK-LABEL: generic_volatile_8xi32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<9>;
-; CHECK-NEXT:    .reg .b64 %rd<3>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b64 %rd1, [generic_volatile_8xi32_param_0];
-; CHECK-NEXT:    ld.volatile.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; CHECK-NEXT:    ld.volatile.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
-; CHECK-NEXT:    ld.param.b64 %rd2, [generic_volatile_8xi32_param_1];
-; CHECK-NEXT:    st.volatile.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
-; CHECK-NEXT:    st.volatile.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
-; CHECK-NEXT:    ret;
+; SM90-LABEL: generic_volatile_8xi32(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [generic_volatile_8xi32_param_0];
+; SM90-NEXT:    ld.volatile.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; SM90-NEXT:    ld.volatile.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
+; SM90-NEXT:    ld.param.b64 %rd2, [generic_volatile_8xi32_param_1];
+; SM90-NEXT:    st.volatile.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
+; SM90-NEXT:    st.volatile.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: generic_volatile_8xi32(
+; SM100:       {
+; SM100-NEXT:    .reg .b64 %rd<7>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [generic_volatile_8xi32_param_0];
+; SM100-NEXT:    ld.volatile.v2.b64 {%rd2, %rd3}, [%rd1];
+; SM100-NEXT:    ld.volatile.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; SM100-NEXT:    ld.param.b64 %rd6, [generic_volatile_8xi32_param_1];
+; SM100-NEXT:    st.volatile.v2.b64 [%rd6+16], {%rd4, %rd5};
+; SM100-NEXT:    st.volatile.v2.b64 [%rd6], {%rd2, %rd3};
+; SM100-NEXT:    ret;
   %a.load = load volatile <8 x i32>, ptr %a
   store volatile <8 x i32> %a.load, ptr %b
   ret void
@@ -290,11 +312,9 @@ define void @generic_volatile_4xi64(ptr %a, ptr %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [generic_volatile_4xi64_param_0];
-; CHECK-NEXT:    ld.volatile.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.volatile.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.volatile.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [generic_volatile_4xi64_param_1];
-; CHECK-NEXT:    st.volatile.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.volatile.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.volatile.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load volatile <4 x i64>, ptr %a
   store volatile <4 x i64> %a.load, ptr %b
@@ -340,11 +360,9 @@ define void @generic_volatile_4xdouble(ptr %a, ptr %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [generic_volatile_4xdouble_param_0];
-; CHECK-NEXT:    ld.volatile.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.volatile.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.volatile.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [generic_volatile_4xdouble_param_1];
-; CHECK-NEXT:    st.volatile.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.volatile.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.volatile.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load volatile <4 x double>, ptr %a
   store volatile <4 x double> %a.load, ptr %b
@@ -496,14 +514,13 @@ define void @global_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) {
 ;
 ; SM100-LABEL: global_8xi32(
 ; SM100:       {
-; SM100-NEXT:    .reg .b32 %r<9>;
-; SM100-NEXT:    .reg .b64 %rd<3>;
+; SM100-NEXT:    .reg .b64 %rd<7>;
 ; SM100-EMPTY:
 ; SM100-NEXT:  // %bb.0:
 ; SM100-NEXT:    ld.param.b64 %rd1, [global_8xi32_param_0];
-; SM100-NEXT:    ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
-; SM100-NEXT:    ld.param.b64 %rd2, [global_8xi32_param_1];
-; SM100-NEXT:    st.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; SM100-NEXT:    ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd6, [global_8xi32_param_1];
+; SM100-NEXT:    st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; SM100-NEXT:    ret;
   %a.load = load <8 x i32>, ptr addrspace(1) %a
   store <8 x i32> %a.load, ptr addrspace(1) %b
@@ -511,29 +528,16 @@ define void @global_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) {
 }
 
 define void @global_4xi64(ptr addrspace(1) %a, ptr addrspace(1) %b) {
-; SM90-LABEL: global_4xi64(
-; SM90:       {
-; SM90-NEXT:    .reg .b64 %rd<7>;
-; SM90-EMPTY:
-; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b64 %rd1, [global_4xi64_param_0];
-; SM90-NEXT:    ld.global.v2.b64 {%rd2, %rd3}, [%rd1];
-; SM90-NEXT:    ld.global.v2.b64 {%rd4, %rd5}, [%rd1+16];
-; SM90-NEXT:    ld.param.b64 %rd6, [global_4xi64_param_1];
-; SM90-NEXT:    st.global.v2.b64 [%rd6+16], {%rd4, %rd5};
-; SM90-NEXT:    st.global.v2.b64 [%rd6], {%rd2, %rd3};
-; SM90-NEXT:    ret;
-;
-; SM100-LABEL: global_4xi64(
-; SM100:       {
-; SM100-NEXT:    .reg .b64 %rd<7>;
-; SM100-EMPTY:
-; SM100-NEXT:  // %bb.0:
-; SM100-NEXT:    ld.param.b64 %rd1, [global_4xi64_param_0];
-; SM100-NEXT:    ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
-; SM100-NEXT:    ld.param.b64 %rd6, [global_4xi64_param_1];
-; SM100-NEXT:    st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
-; SM100-NEXT:    ret;
+; CHECK-LABEL: global_4xi64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_4xi64_param_0];
+; CHECK-NEXT:    ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd6, [global_4xi64_param_1];
+; CHECK-NEXT:    st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
+; CHECK-NEXT:    ret;
   %a.load = load <4 x i64>, ptr addrspace(1) %a
   store <4 x i64> %a.load, ptr addrspace(1) %b
   ret void
@@ -570,29 +574,16 @@ define void @global_8xfloat(ptr addrspace(1) %a, ptr addrspace(1) %b) {
 }
 
 define void @global_4xdouble(ptr addrspace(1) %a, ptr addrspace(1) %b) {
-; SM90-LABEL: global_4xdouble(
-; SM90:       {
-; SM90-NEXT:    .reg .b64 %rd<7>;
-; SM90-EMPTY:
-; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b64 %rd1, [global_4xdouble_param_0];
-; SM90-NEXT:    ld.global.v2.b64 {%rd2, %rd3}, [%rd1];
-; SM90-NEXT:    ld.global.v2.b64 {%rd4, %rd5}, [%rd1+16];
-; SM90-NEXT:    ld.param.b64 %rd6, [global_4xdouble_param_1];
-; SM90-NEXT:    st.global.v2.b64 [%rd6+16], {%rd4, %rd5};
-; SM90-NEXT:    st.global.v2.b64 [%rd6], {%rd2, %rd3};
-; SM90-NEXT:    ret;
-;
-; SM100-LABEL: global_4xdouble(
-; SM100:       {
-; SM100-NEXT:    .reg .b64 %rd<7>;
-; SM100-EMPTY:
-; SM100-NEXT:  // %bb.0:
-; SM100-NEXT:    ld.param.b64 %rd1, [global_4xdouble_param_0];
-; SM100-NEXT:    ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
-; SM100-NEXT:    ld.param.b64 %rd6, [global_4xdouble_param_1];
-; SM100-NEXT:    st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
-; SM100-NEXT:    ret;
+; CHECK-LABEL: global_4xdouble(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_4xdouble_param_0];
+; CHECK-NEXT:    ld.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd6, [global_4xdouble_param_1];
+; CHECK-NEXT:    st.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
+; CHECK-NEXT:    ret;
   %a.load = load <4 x double>, ptr addrspace(1) %a
   store <4 x double> %a.load, ptr addrspace(1) %b
   ret void
@@ -741,14 +732,13 @@ define void @global_volatile_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) {
 ;
 ; SM100-LABEL: global_volatile_8xi32(
 ; SM100:       {
-; SM100-NEXT:    .reg .b32 %r<9>;
-; SM100-NEXT:    .reg .b64 %rd<3>;
+; SM100-NEXT:    .reg .b64 %rd<7>;
 ; SM100-EMPTY:
 ; SM100-NEXT:  // %bb.0:
 ; SM100-NEXT:    ld.param.b64 %rd1, [global_volatile_8xi32_param_0];
-; SM100-NEXT:    ld.volatile.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
-; SM100-NEXT:    ld.param.b64 %rd2, [global_volatile_8xi32_param_1];
-; SM100-NEXT:    st.volatile.global.v8.b32 [%rd2], {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8};
+; SM100-NEXT:    ld.volatile.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; SM100-NEXT:    ld.param.b64 %rd6, [global_volatile_8xi32_param_1];
+; SM100-NEXT:    st.volatile.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; SM100-NEXT:    ret;
   %a.load = load volatile <8 x i32>, ptr addrspace(1) %a
   store volatile <8 x i32> %a.load, ptr addrspace(1) %b
@@ -756,29 +746,16 @@ define void @global_volatile_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) {
 }
 
 define void @global_volatile_4xi64(ptr addrspace(1) %a, ptr addrspace(1) %b) {
-; SM90-LABEL: global_volatile_4xi64(
-; SM90:       {
-; SM90-NEXT:    .reg .b64 %rd<7>;
-; SM90-EMPTY:
-; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b64 %rd1, [global_volatile_4xi64_param_0];
-; SM90-NEXT:    ld.volatile.global.v2.b64 {%rd2, %rd3}, [%rd1];
-; SM90-NEXT:    ld.volatile.global.v2.b64 {%rd4, %rd5}, [%rd1+16];
-; SM90-NEXT:    ld.param.b64 %rd6, [global_volatile_4xi64_param_1];
-; SM90-NEXT:    st.volatile.global.v2.b64 [%rd6+16], {%rd4, %rd5};
-; SM90-NEXT:    st.volatile.global.v2.b64 [%rd6], {%rd2, %rd3};
-; SM90-NEXT:    ret;
-;
-; SM100-LABEL: global_volatile_4xi64(
-; SM100:       {
-; SM100-NEXT:    .reg .b64 %rd<7>;
-; SM100-EMPTY:
-; SM100-NEXT:  // %bb.0:
-; SM100-NEXT:    ld.param.b64 %rd1, [global_volatile_4xi64_param_0];
-; SM100-NEXT:    ld.volatile.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
-; SM100-NEXT:    ld.param.b64 %rd6, [global_volatile_4xi64_param_1];
-; SM100-NEXT:    st.volatile.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
-; SM100-NEXT:    ret;
+; CHECK-LABEL: global_volatile_4xi64(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_volatile_4xi64_param_0];
+; CHECK-NEXT:    ld.volatile.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd6, [global_volatile_4xi64_param_1];
+; CHECK-NEXT:    st.volatile.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
+; CHECK-NEXT:    ret;
   %a.load = load volatile <4 x i64>, ptr addrspace(1) %a
   store volatile <4 x i64> %a.load, ptr addrspace(1) %b
   ret void
@@ -815,29 +792,16 @@ define void @global_volatile_8xfloat(ptr addrspace(1) %a, ptr addrspace(1) %b) {
 }
 
 define void @global_volatile_4xdouble(ptr addrspace(1) %a, ptr addrspace(1) %b) {
-; SM90-LABEL: global_volatile_4xdouble(
-; SM90:       {
-; SM90-NEXT:    .reg .b64 %rd<7>;
-; SM90-EMPTY:
-; SM90-NEXT:  // %bb.0:
-; SM90-NEXT:    ld.param.b64 %rd1, [global_volatile_4xdouble_param_0];
-; SM90-NEXT:    ld.volatile.global.v2.b64 {%rd2, %rd3}, [%rd1];
-; SM90-NEXT:    ld.volatile.global.v2.b64 {%rd4, %rd5}, [%rd1+16];
-; SM90-NEXT:    ld.param.b64 %rd6, [global_volatile_4xdouble_param_1];
-; SM90-NEXT:    st.volatile.global.v2.b64 [%rd6+16], {%rd4, %rd5};
-; SM90-NEXT:    st.volatile.global.v2.b64 [%rd6], {%rd2, %rd3};
-; SM90-NEXT:    ret;
-;
-; SM100-LABEL: global_volatile_4xdouble(
-; SM100:       {
-; SM100-NEXT:    .reg .b64 %rd<7>;
-; SM100-EMPTY:
-; SM100-NEXT:  // %bb.0:
-; SM100-NEXT:    ld.param.b64 %rd1, [global_volatile_4xdouble_param_0];
-; SM100-NEXT:    ld.volatile.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
-; SM100-NEXT:    ld.param.b64 %rd6, [global_volatile_4xdouble_param_1];
-; SM100-NEXT:    st.volatile.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
-; SM100-NEXT:    ret;
+; CHECK-LABEL: global_volatile_4xdouble(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.b64 %rd1, [global_volatile_4xdouble_param_0];
+; CHECK-NEXT:    ld.volatile.global.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
+; CHECK-NEXT:    ld.param.b64 %rd6, [global_volatile_4xdouble_param_1];
+; CHECK-NEXT:    st.volatile.global.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
+; CHECK-NEXT:    ret;
   %a.load = load volatile <4 x double>, ptr addrspace(1) %a
   store volatile <4 x double> %a.load, ptr addrspace(1) %b
   ret void
@@ -924,19 +888,32 @@ define void @shared_16xbfloat(ptr addrspace(3) %a, ptr addrspace(3) %b) {
 }
 
 define void @shared_8xi32(ptr addrspace(3) %a, ptr addrspace(3) %b) {
-; CHECK-LABEL: shared_8xi32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<9>;
-; CHECK-NEXT:    .reg .b64 %rd<3>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b64 %rd1, [shared_8xi32_param_0];
-; CHECK-NEXT:    ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; CHECK-NEXT:    ld.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
-; CHECK-NEXT:    ld.param.b64 %rd2, [shared_8xi32_param_1];
-; CHECK-NEXT:    st.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
-; CHECK-NEXT:    st.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
-; CHECK-NEXT:    ret;
+; SM90-LABEL: shared_8xi32(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [shared_8xi32_param_0];
+; SM90-NEXT:    ld.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; SM90-NEXT:    ld.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
+; SM90-NEXT:    ld.param.b64 %rd2, [shared_8xi32_param_1];
+; SM90-NEXT:    st.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
+; SM90-NEXT:    st.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: shared_8xi32(
+; SM100:       {
+; SM100-NEXT:    .reg .b64 %rd<7>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [shared_8xi32_param_0];
+; SM100-NEXT:    ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; SM100-NEXT:    ld.shared.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; SM100-NEXT:    ld.param.b64 %rd6, [shared_8xi32_param_1];
+; SM100-NEXT:    st.shared.v2.b64 [%rd6+16], {%rd4, %rd5};
+; SM100-NEXT:    st.shared.v2.b64 [%rd6], {%rd2, %rd3};
+; SM100-NEXT:    ret;
   %a.load = load <8 x i32>, ptr addrspace(3) %a
   store <8 x i32> %a.load, ptr addrspace(3) %b
   ret void
@@ -949,11 +926,9 @@ define void @shared_4xi64(ptr addrspace(3) %a, ptr addrspace(3) %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [shared_4xi64_param_0];
-; CHECK-NEXT:    ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.shared.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.shared.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [shared_4xi64_param_1];
-; CHECK-NEXT:    st.shared.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.shared.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.shared.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load <4 x i64>, ptr addrspace(3) %a
   store <4 x i64> %a.load, ptr addrspace(3) %b
@@ -999,11 +974,9 @@ define void @shared_4xdouble(ptr addrspace(3) %a, ptr addrspace(3) %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [shared_4xdouble_param_0];
-; CHECK-NEXT:    ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.shared.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.shared.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [shared_4xdouble_param_1];
-; CHECK-NEXT:    st.shared.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.shared.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.shared.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load <4 x double>, ptr addrspace(3) %a
   store <4 x double> %a.load, ptr addrspace(3) %b
@@ -1089,19 +1062,32 @@ define void @shared_volatile_16xbfloat(ptr addrspace(3) %a, ptr addrspace(3) %b)
 }
 
 define void @shared_volatile_8xi32(ptr addrspace(3) %a, ptr addrspace(3) %b) {
-; CHECK-LABEL: shared_volatile_8xi32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<9>;
-; CHECK-NEXT:    .reg .b64 %rd<3>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b64 %rd1, [shared_volatile_8xi32_param_0];
-; CHECK-NEXT:    ld.volatile.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; CHECK-NEXT:    ld.volatile.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
-; CHECK-NEXT:    ld.param.b64 %rd2, [shared_volatile_8xi32_param_1];
-; CHECK-NEXT:    st.volatile.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
-; CHECK-NEXT:    st.volatile.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
-; CHECK-NEXT:    ret;
+; SM90-LABEL: shared_volatile_8xi32(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [shared_volatile_8xi32_param_0];
+; SM90-NEXT:    ld.volatile.shared.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; SM90-NEXT:    ld.volatile.shared.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
+; SM90-NEXT:    ld.param.b64 %rd2, [shared_volatile_8xi32_param_1];
+; SM90-NEXT:    st.volatile.shared.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
+; SM90-NEXT:    st.volatile.shared.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: shared_volatile_8xi32(
+; SM100:       {
+; SM100-NEXT:    .reg .b64 %rd<7>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [shared_volatile_8xi32_param_0];
+; SM100-NEXT:    ld.volatile.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; SM100-NEXT:    ld.volatile.shared.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; SM100-NEXT:    ld.param.b64 %rd6, [shared_volatile_8xi32_param_1];
+; SM100-NEXT:    st.volatile.shared.v2.b64 [%rd6+16], {%rd4, %rd5};
+; SM100-NEXT:    st.volatile.shared.v2.b64 [%rd6], {%rd2, %rd3};
+; SM100-NEXT:    ret;
   %a.load = load volatile <8 x i32>, ptr addrspace(3) %a
   store volatile <8 x i32> %a.load, ptr addrspace(3) %b
   ret void
@@ -1114,11 +1100,9 @@ define void @shared_volatile_4xi64(ptr addrspace(3) %a, ptr addrspace(3) %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [shared_volatile_4xi64_param_0];
-; CHECK-NEXT:    ld.volatile.shared.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.volatile.shared.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.volatile.shared.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [shared_volatile_4xi64_param_1];
-; CHECK-NEXT:    st.volatile.shared.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.volatile.shared.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.volatile.shared.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load volatile <4 x i64>, ptr addrspace(3) %a
   store volatile <4 x i64> %a.load, ptr addrspace(3) %b
@@ -1164,11 +1148,9 @@ define void @shared_volatile_4xdouble(ptr addrspace(3) %a, ptr addrspace(3) %b)
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [shared_volatile_4xdouble_param_0];
-; CHECK-NEXT:    ld.volatile.shared.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.volatile.shared.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.volatile.shared.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [shared_volatile_4xdouble_param_1];
-; CHECK-NEXT:    st.volatile.shared.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.volatile.shared.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.volatile.shared.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load volatile <4 x double>, ptr addrspace(3) %a
   store volatile <4 x double> %a.load, ptr addrspace(3) %b
@@ -1256,19 +1238,32 @@ define void @local_16xbfloat(ptr addrspace(5) %a, ptr addrspace(5) %b) {
 }
 
 define void @local_8xi32(ptr addrspace(5) %a, ptr addrspace(5) %b) {
-; CHECK-LABEL: local_8xi32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<9>;
-; CHECK-NEXT:    .reg .b64 %rd<3>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b64 %rd1, [local_8xi32_param_0];
-; CHECK-NEXT:    ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; CHECK-NEXT:    ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
-; CHECK-NEXT:    ld.param.b64 %rd2, [local_8xi32_param_1];
-; CHECK-NEXT:    st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
-; CHECK-NEXT:    st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
-; CHECK-NEXT:    ret;
+; SM90-LABEL: local_8xi32(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [local_8xi32_param_0];
+; SM90-NEXT:    ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; SM90-NEXT:    ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
+; SM90-NEXT:    ld.param.b64 %rd2, [local_8xi32_param_1];
+; SM90-NEXT:    st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
+; SM90-NEXT:    st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: local_8xi32(
+; SM100:       {
+; SM100-NEXT:    .reg .b64 %rd<7>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [local_8xi32_param_0];
+; SM100-NEXT:    ld.local.v2.b64 {%rd2, %rd3}, [%rd1];
+; SM100-NEXT:    ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; SM100-NEXT:    ld.param.b64 %rd6, [local_8xi32_param_1];
+; SM100-NEXT:    st.local.v2.b64 [%rd6+16], {%rd4, %rd5};
+; SM100-NEXT:    st.local.v2.b64 [%rd6], {%rd2, %rd3};
+; SM100-NEXT:    ret;
   %a.load = load <8 x i32>, ptr addrspace(5) %a
   store <8 x i32> %a.load, ptr addrspace(5) %b
   ret void
@@ -1281,11 +1276,9 @@ define void @local_4xi64(ptr addrspace(5) %a, ptr addrspace(5) %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [local_4xi64_param_0];
-; CHECK-NEXT:    ld.local.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.local.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [local_4xi64_param_1];
-; CHECK-NEXT:    st.local.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.local.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.local.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load <4 x i64>, ptr addrspace(5) %a
   store <4 x i64> %a.load, ptr addrspace(5) %b
@@ -1331,11 +1324,9 @@ define void @local_4xdouble(ptr addrspace(5) %a, ptr addrspace(5) %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [local_4xdouble_param_0];
-; CHECK-NEXT:    ld.local.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.local.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [local_4xdouble_param_1];
-; CHECK-NEXT:    st.local.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.local.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.local.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load <4 x double>, ptr addrspace(5) %a
   store <4 x double> %a.load, ptr addrspace(5) %b
@@ -1421,19 +1412,32 @@ define void @local_volatile_16xbfloat(ptr addrspace(5) %a, ptr addrspace(5) %b)
 }
 
 define void @local_volatile_8xi32(ptr addrspace(5) %a, ptr addrspace(5) %b) {
-; CHECK-LABEL: local_volatile_8xi32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<9>;
-; CHECK-NEXT:    .reg .b64 %rd<3>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.b64 %rd1, [local_volatile_8xi32_param_0];
-; CHECK-NEXT:    ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
-; CHECK-NEXT:    ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
-; CHECK-NEXT:    ld.param.b64 %rd2, [local_volatile_8xi32_param_1];
-; CHECK-NEXT:    st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
-; CHECK-NEXT:    st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
-; CHECK-NEXT:    ret;
+; SM90-LABEL: local_volatile_8xi32(
+; SM90:       {
+; SM90-NEXT:    .reg .b32 %r<9>;
+; SM90-NEXT:    .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT:  // %bb.0:
+; SM90-NEXT:    ld.param.b64 %rd1, [local_volatile_8xi32_param_0];
+; SM90-NEXT:    ld.local.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1];
+; SM90-NEXT:    ld.local.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1+16];
+; SM90-NEXT:    ld.param.b64 %rd2, [local_volatile_8xi32_param_1];
+; SM90-NEXT:    st.local.v4.b32 [%rd2+16], {%r5, %r6, %r7, %r8};
+; SM90-NEXT:    st.local.v4.b32 [%rd2], {%r1, %r2, %r3, %r4};
+; SM90-NEXT:    ret;
+;
+; SM100-LABEL: local_volatile_8xi32(
+; SM100:       {
+; SM100-NEXT:    .reg .b64 %rd<7>;
+; SM100-EMPTY:
+; SM100-NEXT:  // %bb.0:
+; SM100-NEXT:    ld.param.b64 %rd1, [local_volatile_8xi32_param_0];
+; SM100-NEXT:    ld.local.v2.b64 {%rd2, %rd3}, [%rd1];
+; SM100-NEXT:    ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; SM100-NEXT:    ld.param.b64 %rd6, [local_volatile_8xi32_param_1];
+; SM100-NEXT:    st.local.v2.b64 [%rd6+16], {%rd4, %rd5};
+; SM100-NEXT:    st.local.v2.b64 [%rd6], {%rd2, %rd3};
+; SM100-NEXT:    ret;
   %a.load = load volatile <8 x i32>, ptr addrspace(5) %a
   store volatile <8 x i32> %a.load, ptr addrspace(5) %b
   ret void
@@ -1446,11 +1450,9 @@ define void @local_volatile_4xi64(ptr addrspace(5) %a, ptr addrspace(5) %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [local_volatile_4xi64_param_0];
-; CHECK-NEXT:    ld.local.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.local.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [local_volatile_4xi64_param_1];
-; CHECK-NEXT:    st.local.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.local.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.local.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load volatile <4 x i64>, ptr addrspace(5) %a
   store volatile <4 x i64> %a.load, ptr addrspace(5) %b
@@ -1496,11 +1498,9 @@ define void @local_volatile_4xdouble(ptr addrspace(5) %a, ptr addrspace(5) %b) {
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b64 %rd1, [local_volatile_4xdouble_param_0];
-; CHECK-NEXT:    ld.local.v2.b64 {%rd2, %rd3}, [%rd1];
-; CHECK-NEXT:    ld.local.v2.b64 {%rd4, %rd5}, [%rd1+16];
+; CHECK-NEXT:    ld.local.v4.b64 {%rd2, %rd3, %rd4, %rd5}, [%rd1];
 ; CHECK-NEXT:    ld.param.b64 %rd6, [local_volatile_4xdouble_param_1];
-; CHECK-NEXT:    st.local.v2.b64 [%rd6+16], {%rd4, %rd5};
-; CHECK-NEXT:    st.local.v2.b64 [%rd6], {%rd2, %rd3};
+; CHECK-NEXT:    st.local.v4.b64 [%rd6], {%rd2, %rd3, %rd4, %rd5};
 ; CHECK-NEXT:    ret;
   %a.load = load volatile <4 x double>, ptr addrspace(5) %a
   store volatile <4 x double> %a.load, ptr addrspace(5) %b
diff --git a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
index 7553c727b09c5..fb3bafbd0db96 100644
--- a/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
+++ b/llvm/test/CodeGen/NVPTX/load-with-non-coherent-cache.ll
@@ -536,10 +536,8 @@ define ptx_kernel void @foo17(ptr noalias readonly %from, ptr %to) {
 ; SM20-NEXT:    cvta.to.global.u64 %rd2, %rd1;
 ; SM20-NEXT:    ld.param.b64 %rd3, [foo17_param_1];
 ; SM20-NEXT:    cvta.to.global.u64 %rd4, %rd3;
-; SM20-NEXT:    ld.global.v2.b64 {%rd5, %rd6}, [%rd2];
-; SM20-NEXT:    ld.global.v2.b64 {%rd7, %rd8}, [%rd2+16];
-; SM20-NEXT:    st.global.v2.b64 [%rd4+16], {%rd7, %rd8};
-; SM20-NEXT:    st.global.v2.b64 [%rd4], {%rd5, %rd6};
+; SM20-NEXT:    ld.global.v4.b64 {%rd5, %rd6, %rd7, %rd8}, [%rd2];
+; SM20-NEXT:    st.global.v4.b64 [%rd4], {%rd5, %rd6, %rd7, %rd8};
 ; SM20-NEXT:    ret;
 ;
 ; SM35-LABEL: foo17(
@@ -551,10 +549,8 @@ define ptx_kernel void @foo17(ptr noalias readonly %from, ptr %to) {
 ; SM35-NEXT:    cvta.to.global.u64 %rd2, %rd1;
 ; SM35-NEXT:    ld.param.b64 %rd3, [foo17_param_1];
 ; SM35-NEXT:    cvta.to.global.u64 %rd4, %rd3;
-; SM35-NEXT:    ld.global.nc.v2.b64 {%rd5, %rd6}, [%rd2];
-; SM35-NEXT:    ld.global.nc.v2.b64 {%rd7, %rd8}, [%rd2+16];
-; SM35-NEXT:    st.global.v2.b64 [%rd4+16], {%rd7, %rd8};
-; SM35-NEXT:    st.global.v2.b64 [%rd4], {%rd5, %rd6};
+; SM35-NEXT:    ld.global.nc.v4.b64 {%rd5, %rd6, %rd7, %rd8}, [%rd2];
+; SM35-NEXT:    st.global.v4.b64 [%rd4], {%rd5, %rd6, %rd7, %rd8};
 ; SM35-NEXT:    ret;
   %1 = load <4 x double>, ptr %from
   store <4 x double> %1, ptr %to
diff --git a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
index f871e4039a558..87787ba2bf81c 100644
--- a/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
+++ b/llvm/test/CodeGen/NVPTX/reduction-intrinsics.ll
@@ -1452,22 +1452,44 @@ define i16 @reduce_add_i16_nonpow2(<7 x i16> %in) {
 }
 
 define i32 @reduce_add_i32(<8 x i32> %in) {
-; CHECK-LABEL: reduce_add_i32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0];
-; CHECK-NEXT:    add.s32 %r9, %r4, %r8;
-; CHECK-NEXT:    add.s32 %r10, %r2, %r6;
-; CHECK-NEXT:    add.s32 %r11, %r10, %r9;
-; CHECK-NEXT:    add.s32 %r12, %r3, %r7;
-; CHECK-NEXT:    add.s32 %r13, %r1, %r5;
-; CHECK-NEXT:    add.s32 %r14, %r13, %r12;
-; CHECK-NEXT:    add.s32 %r15, %r14, %r11;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_add_i32(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_add_i32_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_add_i32_param_0];
+; CHECK-SM80-NEXT:    add.s32 %r9, %r4, %r8;
+; CHECK-SM80-NEXT:    add.s32 %r10, %r2, %r6;
+; CHECK-SM80-NEXT:    add.s32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT:    add.s32 %r12, %r3, %r7;
+; CHECK-SM80-NEXT:    add.s32 %r13, %r1, %r5;
+; CHECK-SM80-NEXT:    add.s32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT:    add.s32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_add_i32(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [reduce_add_i32_param_0+16];
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [reduce_add_i32_param_0];
+; CHECK-SM100-NEXT:    mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM100-NEXT:    add.s32 %r5, %r4, %r2;
+; CHECK-SM100-NEXT:    mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM100-NEXT:    mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM100-NEXT:    add.s32 %r10, %r9, %r7;
+; CHECK-SM100-NEXT:    add.s32 %r11, %r10, %r5;
+; CHECK-SM100-NEXT:    add.s32 %r12, %r3, %r1;
+; CHECK-SM100-NEXT:    add.s32 %r13, %r8, %r6;
+; CHECK-SM100-NEXT:    add.s32 %r14, %r13, %r12;
+; CHECK-SM100-NEXT:    add.s32 %r15, %r14, %r11;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM100-NEXT:    ret;
   %res = call i32 @llvm.vector.reduce.add(<8 x i32> %in)
   ret i32 %res
 }
@@ -1543,22 +1565,44 @@ define i16 @reduce_mul_i16_nonpow2(<7 x i16> %in) {
 }
 
 define i32 @reduce_mul_i32(<8 x i32> %in) {
-; CHECK-LABEL: reduce_mul_i32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0];
-; CHECK-NEXT:    mul.lo.s32 %r9, %r4, %r8;
-; CHECK-NEXT:    mul.lo.s32 %r10, %r2, %r6;
-; CHECK-NEXT:    mul.lo.s32 %r11, %r10, %r9;
-; CHECK-NEXT:    mul.lo.s32 %r12, %r3, %r7;
-; CHECK-NEXT:    mul.lo.s32 %r13, %r1, %r5;
-; CHECK-NEXT:    mul.lo.s32 %r14, %r13, %r12;
-; CHECK-NEXT:    mul.lo.s32 %r15, %r14, %r11;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_mul_i32(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_mul_i32_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_mul_i32_param_0];
+; CHECK-SM80-NEXT:    mul.lo.s32 %r9, %r4, %r8;
+; CHECK-SM80-NEXT:    mul.lo.s32 %r10, %r2, %r6;
+; CHECK-SM80-NEXT:    mul.lo.s32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT:    mul.lo.s32 %r12, %r3, %r7;
+; CHECK-SM80-NEXT:    mul.lo.s32 %r13, %r1, %r5;
+; CHECK-SM80-NEXT:    mul.lo.s32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT:    mul.lo.s32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_mul_i32(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [reduce_mul_i32_param_0+16];
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [reduce_mul_i32_param_0];
+; CHECK-SM100-NEXT:    mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM100-NEXT:    mul.lo.s32 %r5, %r4, %r2;
+; CHECK-SM100-NEXT:    mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM100-NEXT:    mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM100-NEXT:    mul.lo.s32 %r10, %r9, %r7;
+; CHECK-SM100-NEXT:    mul.lo.s32 %r11, %r10, %r5;
+; CHECK-SM100-NEXT:    mul.lo.s32 %r12, %r3, %r1;
+; CHECK-SM100-NEXT:    mul.lo.s32 %r13, %r8, %r6;
+; CHECK-SM100-NEXT:    mul.lo.s32 %r14, %r13, %r12;
+; CHECK-SM100-NEXT:    mul.lo.s32 %r15, %r14, %r11;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM100-NEXT:    ret;
   %res = call i32 @llvm.vector.reduce.mul(<8 x i32> %in)
   ret i32 %res
 }
@@ -1673,22 +1717,44 @@ define i16 @reduce_umax_i16_nonpow2(<7 x i16> %in) {
 }
 
 define i32 @reduce_umax_i32(<8 x i32> %in) {
-; CHECK-LABEL: reduce_umax_i32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0];
-; CHECK-NEXT:    max.u32 %r9, %r4, %r8;
-; CHECK-NEXT:    max.u32 %r10, %r2, %r6;
-; CHECK-NEXT:    max.u32 %r11, %r10, %r9;
-; CHECK-NEXT:    max.u32 %r12, %r3, %r7;
-; CHECK-NEXT:    max.u32 %r13, %r1, %r5;
-; CHECK-NEXT:    max.u32 %r14, %r13, %r12;
-; CHECK-NEXT:    max.u32 %r15, %r14, %r11;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_umax_i32(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umax_i32_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umax_i32_param_0];
+; CHECK-SM80-NEXT:    max.u32 %r9, %r4, %r8;
+; CHECK-SM80-NEXT:    max.u32 %r10, %r2, %r6;
+; CHECK-SM80-NEXT:    max.u32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT:    max.u32 %r12, %r3, %r7;
+; CHECK-SM80-NEXT:    max.u32 %r13, %r1, %r5;
+; CHECK-SM80-NEXT:    max.u32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT:    max.u32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_umax_i32(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [reduce_umax_i32_param_0+16];
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [reduce_umax_i32_param_0];
+; CHECK-SM100-NEXT:    mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM100-NEXT:    max.u32 %r5, %r4, %r2;
+; CHECK-SM100-NEXT:    mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM100-NEXT:    mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM100-NEXT:    max.u32 %r10, %r9, %r7;
+; CHECK-SM100-NEXT:    max.u32 %r11, %r10, %r5;
+; CHECK-SM100-NEXT:    max.u32 %r12, %r3, %r1;
+; CHECK-SM100-NEXT:    max.u32 %r13, %r8, %r6;
+; CHECK-SM100-NEXT:    max.u32 %r14, %r13, %r12;
+; CHECK-SM100-NEXT:    max.u32 %r15, %r14, %r11;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM100-NEXT:    ret;
   %res = call i32 @llvm.vector.reduce.umax(<8 x i32> %in)
   ret i32 %res
 }
@@ -1803,22 +1869,44 @@ define i16 @reduce_umin_i16_nonpow2(<7 x i16> %in) {
 }
 
 define i32 @reduce_umin_i32(<8 x i32> %in) {
-; CHECK-LABEL: reduce_umin_i32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0];
-; CHECK-NEXT:    min.u32 %r9, %r4, %r8;
-; CHECK-NEXT:    min.u32 %r10, %r2, %r6;
-; CHECK-NEXT:    min.u32 %r11, %r10, %r9;
-; CHECK-NEXT:    min.u32 %r12, %r3, %r7;
-; CHECK-NEXT:    min.u32 %r13, %r1, %r5;
-; CHECK-NEXT:    min.u32 %r14, %r13, %r12;
-; CHECK-NEXT:    min.u32 %r15, %r14, %r11;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_umin_i32(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_umin_i32_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_umin_i32_param_0];
+; CHECK-SM80-NEXT:    min.u32 %r9, %r4, %r8;
+; CHECK-SM80-NEXT:    min.u32 %r10, %r2, %r6;
+; CHECK-SM80-NEXT:    min.u32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT:    min.u32 %r12, %r3, %r7;
+; CHECK-SM80-NEXT:    min.u32 %r13, %r1, %r5;
+; CHECK-SM80-NEXT:    min.u32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT:    min.u32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_umin_i32(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [reduce_umin_i32_param_0+16];
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [reduce_umin_i32_param_0];
+; CHECK-SM100-NEXT:    mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM100-NEXT:    min.u32 %r5, %r4, %r2;
+; CHECK-SM100-NEXT:    mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM100-NEXT:    mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM100-NEXT:    min.u32 %r10, %r9, %r7;
+; CHECK-SM100-NEXT:    min.u32 %r11, %r10, %r5;
+; CHECK-SM100-NEXT:    min.u32 %r12, %r3, %r1;
+; CHECK-SM100-NEXT:    min.u32 %r13, %r8, %r6;
+; CHECK-SM100-NEXT:    min.u32 %r14, %r13, %r12;
+; CHECK-SM100-NEXT:    min.u32 %r15, %r14, %r11;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM100-NEXT:    ret;
   %res = call i32 @llvm.vector.reduce.umin(<8 x i32> %in)
   ret i32 %res
 }
@@ -1933,22 +2021,44 @@ define i16 @reduce_smax_i16_nonpow2(<7 x i16> %in) {
 }
 
 define i32 @reduce_smax_i32(<8 x i32> %in) {
-; CHECK-LABEL: reduce_smax_i32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0];
-; CHECK-NEXT:    max.s32 %r9, %r4, %r8;
-; CHECK-NEXT:    max.s32 %r10, %r2, %r6;
-; CHECK-NEXT:    max.s32 %r11, %r10, %r9;
-; CHECK-NEXT:    max.s32 %r12, %r3, %r7;
-; CHECK-NEXT:    max.s32 %r13, %r1, %r5;
-; CHECK-NEXT:    max.s32 %r14, %r13, %r12;
-; CHECK-NEXT:    max.s32 %r15, %r14, %r11;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_smax_i32(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smax_i32_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smax_i32_param_0];
+; CHECK-SM80-NEXT:    max.s32 %r9, %r4, %r8;
+; CHECK-SM80-NEXT:    max.s32 %r10, %r2, %r6;
+; CHECK-SM80-NEXT:    max.s32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT:    max.s32 %r12, %r3, %r7;
+; CHECK-SM80-NEXT:    max.s32 %r13, %r1, %r5;
+; CHECK-SM80-NEXT:    max.s32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT:    max.s32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_smax_i32(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [reduce_smax_i32_param_0+16];
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [reduce_smax_i32_param_0];
+; CHECK-SM100-NEXT:    mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM100-NEXT:    max.s32 %r5, %r4, %r2;
+; CHECK-SM100-NEXT:    mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM100-NEXT:    mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM100-NEXT:    max.s32 %r10, %r9, %r7;
+; CHECK-SM100-NEXT:    max.s32 %r11, %r10, %r5;
+; CHECK-SM100-NEXT:    max.s32 %r12, %r3, %r1;
+; CHECK-SM100-NEXT:    max.s32 %r13, %r8, %r6;
+; CHECK-SM100-NEXT:    max.s32 %r14, %r13, %r12;
+; CHECK-SM100-NEXT:    max.s32 %r15, %r14, %r11;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM100-NEXT:    ret;
   %res = call i32 @llvm.vector.reduce.smax(<8 x i32> %in)
   ret i32 %res
 }
@@ -2063,22 +2173,44 @@ define i16 @reduce_smin_i16_nonpow2(<7 x i16> %in) {
 }
 
 define i32 @reduce_smin_i32(<8 x i32> %in) {
-; CHECK-LABEL: reduce_smin_i32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0];
-; CHECK-NEXT:    min.s32 %r9, %r4, %r8;
-; CHECK-NEXT:    min.s32 %r10, %r2, %r6;
-; CHECK-NEXT:    min.s32 %r11, %r10, %r9;
-; CHECK-NEXT:    min.s32 %r12, %r3, %r7;
-; CHECK-NEXT:    min.s32 %r13, %r1, %r5;
-; CHECK-NEXT:    min.s32 %r14, %r13, %r12;
-; CHECK-NEXT:    min.s32 %r15, %r14, %r11;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_smin_i32(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_smin_i32_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_smin_i32_param_0];
+; CHECK-SM80-NEXT:    min.s32 %r9, %r4, %r8;
+; CHECK-SM80-NEXT:    min.s32 %r10, %r2, %r6;
+; CHECK-SM80-NEXT:    min.s32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT:    min.s32 %r12, %r3, %r7;
+; CHECK-SM80-NEXT:    min.s32 %r13, %r1, %r5;
+; CHECK-SM80-NEXT:    min.s32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT:    min.s32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_smin_i32(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [reduce_smin_i32_param_0+16];
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [reduce_smin_i32_param_0];
+; CHECK-SM100-NEXT:    mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM100-NEXT:    min.s32 %r5, %r4, %r2;
+; CHECK-SM100-NEXT:    mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM100-NEXT:    mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM100-NEXT:    min.s32 %r10, %r9, %r7;
+; CHECK-SM100-NEXT:    min.s32 %r11, %r10, %r5;
+; CHECK-SM100-NEXT:    min.s32 %r12, %r3, %r1;
+; CHECK-SM100-NEXT:    min.s32 %r13, %r8, %r6;
+; CHECK-SM100-NEXT:    min.s32 %r14, %r13, %r12;
+; CHECK-SM100-NEXT:    min.s32 %r15, %r14, %r11;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM100-NEXT:    ret;
   %res = call i32 @llvm.vector.reduce.smin(<8 x i32> %in)
   ret i32 %res
 }
@@ -2152,22 +2284,44 @@ define i16 @reduce_and_i16_nonpow2(<7 x i16> %in) {
 }
 
 define i32 @reduce_and_i32(<8 x i32> %in) {
-; CHECK-LABEL: reduce_and_i32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0];
-; CHECK-NEXT:    and.b32 %r9, %r4, %r8;
-; CHECK-NEXT:    and.b32 %r10, %r2, %r6;
-; CHECK-NEXT:    and.b32 %r11, %r10, %r9;
-; CHECK-NEXT:    and.b32 %r12, %r3, %r7;
-; CHECK-NEXT:    and.b32 %r13, %r1, %r5;
-; CHECK-NEXT:    and.b32 %r14, %r13, %r12;
-; CHECK-NEXT:    and.b32 %r15, %r14, %r11;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_and_i32(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_and_i32_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_and_i32_param_0];
+; CHECK-SM80-NEXT:    and.b32 %r9, %r4, %r8;
+; CHECK-SM80-NEXT:    and.b32 %r10, %r2, %r6;
+; CHECK-SM80-NEXT:    and.b32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT:    and.b32 %r12, %r3, %r7;
+; CHECK-SM80-NEXT:    and.b32 %r13, %r1, %r5;
+; CHECK-SM80-NEXT:    and.b32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT:    and.b32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_and_i32(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [reduce_and_i32_param_0+16];
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [reduce_and_i32_param_0];
+; CHECK-SM100-NEXT:    mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM100-NEXT:    and.b32 %r5, %r4, %r2;
+; CHECK-SM100-NEXT:    mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM100-NEXT:    mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM100-NEXT:    and.b32 %r10, %r9, %r7;
+; CHECK-SM100-NEXT:    and.b32 %r11, %r10, %r5;
+; CHECK-SM100-NEXT:    and.b32 %r12, %r3, %r1;
+; CHECK-SM100-NEXT:    and.b32 %r13, %r8, %r6;
+; CHECK-SM100-NEXT:    and.b32 %r14, %r13, %r12;
+; CHECK-SM100-NEXT:    and.b32 %r15, %r14, %r11;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM100-NEXT:    ret;
   %res = call i32 @llvm.vector.reduce.and(<8 x i32> %in)
   ret i32 %res
 }
@@ -2241,22 +2395,44 @@ define i16 @reduce_or_i16_nonpow2(<7 x i16> %in) {
 }
 
 define i32 @reduce_or_i32(<8 x i32> %in) {
-; CHECK-LABEL: reduce_or_i32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0];
-; CHECK-NEXT:    or.b32 %r9, %r4, %r8;
-; CHECK-NEXT:    or.b32 %r10, %r2, %r6;
-; CHECK-NEXT:    or.b32 %r11, %r10, %r9;
-; CHECK-NEXT:    or.b32 %r12, %r3, %r7;
-; CHECK-NEXT:    or.b32 %r13, %r1, %r5;
-; CHECK-NEXT:    or.b32 %r14, %r13, %r12;
-; CHECK-NEXT:    or.b32 %r15, %r14, %r11;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_or_i32(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_or_i32_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_or_i32_param_0];
+; CHECK-SM80-NEXT:    or.b32 %r9, %r4, %r8;
+; CHECK-SM80-NEXT:    or.b32 %r10, %r2, %r6;
+; CHECK-SM80-NEXT:    or.b32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT:    or.b32 %r12, %r3, %r7;
+; CHECK-SM80-NEXT:    or.b32 %r13, %r1, %r5;
+; CHECK-SM80-NEXT:    or.b32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT:    or.b32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_or_i32(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [reduce_or_i32_param_0+16];
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [reduce_or_i32_param_0];
+; CHECK-SM100-NEXT:    mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM100-NEXT:    or.b32 %r5, %r4, %r2;
+; CHECK-SM100-NEXT:    mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM100-NEXT:    mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM100-NEXT:    or.b32 %r10, %r9, %r7;
+; CHECK-SM100-NEXT:    or.b32 %r11, %r10, %r5;
+; CHECK-SM100-NEXT:    or.b32 %r12, %r3, %r1;
+; CHECK-SM100-NEXT:    or.b32 %r13, %r8, %r6;
+; CHECK-SM100-NEXT:    or.b32 %r14, %r13, %r12;
+; CHECK-SM100-NEXT:    or.b32 %r15, %r14, %r11;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM100-NEXT:    ret;
   %res = call i32 @llvm.vector.reduce.or(<8 x i32> %in)
   ret i32 %res
 }
@@ -2330,22 +2506,44 @@ define i16 @reduce_xor_i16_nonpow2(<7 x i16> %in) {
 }
 
 define i32 @reduce_xor_i32(<8 x i32> %in) {
-; CHECK-LABEL: reduce_xor_i32(
-; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<16>;
-; CHECK-EMPTY:
-; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16];
-; CHECK-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0];
-; CHECK-NEXT:    xor.b32 %r9, %r4, %r8;
-; CHECK-NEXT:    xor.b32 %r10, %r2, %r6;
-; CHECK-NEXT:    xor.b32 %r11, %r10, %r9;
-; CHECK-NEXT:    xor.b32 %r12, %r3, %r7;
-; CHECK-NEXT:    xor.b32 %r13, %r1, %r5;
-; CHECK-NEXT:    xor.b32 %r14, %r13, %r12;
-; CHECK-NEXT:    xor.b32 %r15, %r14, %r11;
-; CHECK-NEXT:    st.param.b32 [func_retval0], %r15;
-; CHECK-NEXT:    ret;
+; CHECK-SM80-LABEL: reduce_xor_i32(
+; CHECK-SM80:       {
+; CHECK-SM80-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM80-EMPTY:
+; CHECK-SM80-NEXT:  // %bb.0:
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r5, %r6, %r7, %r8}, [reduce_xor_i32_param_0+16];
+; CHECK-SM80-NEXT:    ld.param.v4.b32 {%r1, %r2, %r3, %r4}, [reduce_xor_i32_param_0];
+; CHECK-SM80-NEXT:    xor.b32 %r9, %r4, %r8;
+; CHECK-SM80-NEXT:    xor.b32 %r10, %r2, %r6;
+; CHECK-SM80-NEXT:    xor.b32 %r11, %r10, %r9;
+; CHECK-SM80-NEXT:    xor.b32 %r12, %r3, %r7;
+; CHECK-SM80-NEXT:    xor.b32 %r13, %r1, %r5;
+; CHECK-SM80-NEXT:    xor.b32 %r14, %r13, %r12;
+; CHECK-SM80-NEXT:    xor.b32 %r15, %r14, %r11;
+; CHECK-SM80-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM80-NEXT:    ret;
+;
+; CHECK-SM100-LABEL: reduce_xor_i32(
+; CHECK-SM100:       {
+; CHECK-SM100-NEXT:    .reg .b32 %r<16>;
+; CHECK-SM100-NEXT:    .reg .b64 %rd<5>;
+; CHECK-SM100-EMPTY:
+; CHECK-SM100-NEXT:  // %bb.0:
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd3, %rd4}, [reduce_xor_i32_param_0+16];
+; CHECK-SM100-NEXT:    ld.param.v2.b64 {%rd1, %rd2}, [reduce_xor_i32_param_0];
+; CHECK-SM100-NEXT:    mov.b64 {%r1, %r2}, %rd4;
+; CHECK-SM100-NEXT:    mov.b64 {%r3, %r4}, %rd2;
+; CHECK-SM100-NEXT:    xor.b32 %r5, %r4, %r2;
+; CHECK-SM100-NEXT:    mov.b64 {%r6, %r7}, %rd3;
+; CHECK-SM100-NEXT:    mov.b64 {%r8, %r9}, %rd1;
+; CHECK-SM100-NEXT:    xor.b32 %r10, %r9, %r7;
+; CHECK-SM100-NEXT:    xor.b32 %r11, %r10, %r5;
+; CHECK-SM100-NEXT:    xor.b32 %r12, %r3, %r1;
+; CHECK-SM100-NEXT:    xor.b32 %r13, %r8, %r6;
+; CHECK-SM100-NEXT:    xor.b32 %r14, %r13, %r12;
+; CHECK-SM100-NEXT:    xor.b32 %r15, %r14, %r11;
+; CHECK-SM100-NEXT:    st.param.b32 [func_retval0], %r15;
+; CHECK-SM100-NEXT:    ret;
   %res = call i32 @llvm.vector.reduce.xor(<8 x i32> %in)
   ret i32 %res
 }



More information about the llvm-commits mailing list