[llvm] 9f231a8 - [NVPTX] Prefer ValueType when defining DAG patterns (NFC) (#120161)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 17 13:49:35 PST 2024


Author: Alex MacLean
Date: 2024-12-17T13:49:31-08:00
New Revision: 9f231a85004fad080980e80ef881c81d1d5bb60e

URL: https://github.com/llvm/llvm-project/commit/9f231a85004fad080980e80ef881c81d1d5bb60e
DIFF: https://github.com/llvm/llvm-project/commit/9f231a85004fad080980e80ef881c81d1d5bb60e.diff

LOG: [NVPTX] Prefer ValueType when defining DAG patterns (NFC) (#120161)

Replace uses of register class in dag patterns with value types. These
types are much more concise and in cases where a single register class
maps to multiple types, they avoid the need for both.

Added: 
    

Modified: 
    llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
    llvm/lib/Target/NVPTX/NVPTXIntrinsics.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index a7836ccc45f476..abaf8e0b0ec1f8 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -213,33 +213,33 @@ multiclass I3<string OpcStr, SDNode OpNode> {
   def i64rr :
     NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
               !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
-              [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+              [(set i64:$dst, (OpNode i64:$a, i64:$b))]>;
   def i64ri :
     NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
               !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
-              [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+              [(set i64:$dst, (OpNode i64:$a, imm:$b))]>;
   def i32rr :
     NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
               !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
-              [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 Int32Regs:$b)))]>;
+              [(set i32:$dst, (OpNode i32:$a, i32:$b))]>;
   def i32ri :
     NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
               !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
-              [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), imm:$b))]>;
+              [(set i32:$dst, (OpNode i32:$a, imm:$b))]>;
   def i16rr :
     NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
               !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
-              [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+              [(set i16:$dst, (OpNode i16:$a, i16:$b))]>;
   def i16ri :
     NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
               !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
-              [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (imm):$b))]>;
+              [(set i16:$dst, (OpNode i16:$a, (imm):$b))]>;
 }
 
 class I16x2<string OpcStr, SDNode OpNode> :
  NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
               !strconcat(OpcStr, "16x2 \t$dst, $a, $b;"),
-              [(set Int32Regs:$dst, (OpNode (v2i16 Int32Regs:$a), (v2i16 Int32Regs:$b)))]>,
+              [(set v2i16:$dst, (OpNode v2i16:$a, v2i16:$b))]>,
               Requires<[hasPTX<80>, hasSM<90>]>;
 
 // Template for instructions which take 3 int args.  The instructions are
@@ -249,20 +249,20 @@ multiclass ADD_SUB_INT_CARRY<string OpcStr, SDNode OpNode> {
     def i32rr :
       NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
                 !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
-                [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 Int32Regs:$b)))]>;
+                [(set i32:$dst, (OpNode i32:$a, i32:$b))]>;
     def i32ri :
       NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
                 !strconcat(OpcStr, ".s32 \t$dst, $a, $b;"),
-                [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), imm:$b))]>;
+                [(set i32:$dst, (OpNode i32:$a, imm:$b))]>;
     def i64rr :
       NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
                 !strconcat(OpcStr, ".s64 \t$dst, $a, $b;"),
-                [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>,
+                [(set i64:$dst, (OpNode i64:$a, i64:$b))]>,
       Requires<[hasPTX<43>]>;
     def i64ri :
       NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
                 !strconcat(OpcStr, ".s64 \t$dst, $a, $b;"),
-                [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>,
+                [(set i64:$dst, (OpNode i64:$a, imm:$b))]>,
       Requires<[hasPTX<43>]>;
   }
 }
@@ -277,72 +277,72 @@ multiclass FMINIMUMMAXIMUM<string OpcStr, bit NaN, SDNode OpNode> {
      NVPTXInst<(outs Float64Regs:$dst),
                (ins Float64Regs:$a, Float64Regs:$b),
                !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
-               [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>;
+               [(set f64:$dst, (OpNode f64:$a, f64:$b))]>;
    def f64ri :
      NVPTXInst<(outs Float64Regs:$dst),
                (ins Float64Regs:$a, f64imm:$b),
                !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
-               [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>;
+               [(set f64:$dst, (OpNode f64:$a, fpimm:$b))]>;
   }
    def f32rr_ftz :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, Float32Regs:$b),
                !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, f32:$b))]>,
                Requires<[doF32FTZ]>;
    def f32ri_ftz :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, f32imm:$b),
                !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, fpimm:$b))]>,
                Requires<[doF32FTZ]>;
    def f32rr :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, Float32Regs:$b),
                !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>;
+               [(set f32:$dst, (OpNode f32:$a, f32:$b))]>;
    def f32ri :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, f32imm:$b),
                !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>;
+               [(set f32:$dst, (OpNode f32:$a, fpimm:$b))]>;
 
    def f16rr_ftz :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b)))]>,
+               [(set f16:$dst, (OpNode f16:$a, f16:$b))]>,
                Requires<[useFP16Math, doF32FTZ]>;
    def f16rr :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b)))]>,
+               [(set f16:$dst, (OpNode f16:$a, f16:$b))]>,
                Requires<[useFP16Math, hasSM<80>, hasPTX<70>]>;
 
    def f16x2rr_ftz :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (v2f16 Int32Regs:$a), (v2f16 Int32Regs:$b)))]>,
+               [(set v2f16:$dst, (OpNode v2f16:$a, v2f16:$b))]>,
                Requires<[useFP16Math, hasSM<80>, hasPTX<70>, doF32FTZ]>;
    def f16x2rr :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (v2f16 Int32Regs:$a), (v2f16 Int32Regs:$b)))]>,
+               [(set v2f16:$dst, (OpNode v2f16:$a, v2f16:$b))]>,
                Requires<[useFP16Math, hasSM<80>, hasPTX<70>]>;
    def bf16rr :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".bf16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (bf16 Int16Regs:$a), (bf16 Int16Regs:$b)))]>,
+               [(set bf16:$dst, (OpNode bf16:$a, bf16:$b))]>,
                Requires<[hasBF16Math, hasSM<80>, hasPTX<70>]>;
    def bf16x2rr :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".bf16x2 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (v2bf16 Int32Regs:$a), (v2bf16 Int32Regs:$b)))]>,
+               [(set v2bf16:$dst, (OpNode v2bf16:$a, v2bf16:$b))]>,
                Requires<[hasBF16Math, hasSM<80>, hasPTX<70>]>;
 }
 
@@ -360,161 +360,161 @@ multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
      NVPTXInst<(outs Float64Regs:$dst),
                (ins Float64Regs:$a, Float64Regs:$b),
                !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
-               [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+               [(set f64:$dst, (OpNode f64:$a, f64:$b))]>,
                Requires<[allowFMA]>;
    def f64ri :
      NVPTXInst<(outs Float64Regs:$dst),
                (ins Float64Regs:$a, f64imm:$b),
                !strconcat(OpcStr, ".f64 \t$dst, $a, $b;"),
-               [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+               [(set f64:$dst, (OpNode f64:$a, fpimm:$b))]>,
                Requires<[allowFMA]>;
    def f32rr_ftz :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, Float32Regs:$b),
                !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, f32:$b))]>,
                Requires<[allowFMA, doF32FTZ]>;
    def f32ri_ftz :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, f32imm:$b),
                !strconcat(OpcStr, ".ftz.f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, fpimm:$b))]>,
                Requires<[allowFMA, doF32FTZ]>;
    def f32rr :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, Float32Regs:$b),
                !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, f32:$b))]>,
                Requires<[allowFMA]>;
    def f32ri :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, f32imm:$b),
                !strconcat(OpcStr, ".f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, fpimm:$b))]>,
                Requires<[allowFMA]>;
 
    def f16rr_ftz :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".ftz.f16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b)))]>,
+               [(set f16:$dst, (OpNode f16:$a, f16:$b))]>,
                Requires<[useFP16Math, allowFMA, doF32FTZ]>;
    def f16rr :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".f16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b)))]>,
+               [(set f16:$dst, (OpNode f16:$a, f16:$b))]>,
                Requires<[useFP16Math, allowFMA]>;
 
    def f16x2rr_ftz :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a, $b;"),
-               [(set (v2f16 Int32Regs:$dst), (OpNode (v2f16 Int32Regs:$a), (v2f16 Int32Regs:$b)))]>,
+               [(set v2f16:$dst, (OpNode v2f16:$a, v2f16:$b))]>,
                Requires<[useFP16Math, allowFMA, doF32FTZ]>;
    def f16x2rr :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".f16x2 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (v2f16 Int32Regs:$a), (v2f16 Int32Regs:$b)))]>,
+               [(set v2f16:$dst, (OpNode v2f16:$a, v2f16:$b))]>,
                Requires<[useFP16Math, allowFMA]>;
    def bf16rr :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".bf16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (bf16 Int16Regs:$a), (bf16 Int16Regs:$b)))]>,
+               [(set bf16:$dst, (OpNode bf16:$a, bf16:$b))]>,
                Requires<[hasBF16Math, allowFMA]>;
 
    def bf16x2rr :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".bf16x2 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (v2bf16 Int32Regs:$a), (v2bf16 Int32Regs:$b)))]>,
+               [(set v2bf16:$dst, (OpNode v2bf16:$a, v2bf16:$b))]>,
                Requires<[hasBF16Math, allowFMA]>;
    // These have strange names so we don't perturb existing mir tests.
    def _rnf64rr :
      NVPTXInst<(outs Float64Regs:$dst),
                (ins Float64Regs:$a, Float64Regs:$b),
                !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
-               [(set Float64Regs:$dst, (OpNode Float64Regs:$a, Float64Regs:$b))]>,
+               [(set f64:$dst, (OpNode f64:$a, f64:$b))]>,
                Requires<[noFMA]>;
    def _rnf64ri :
      NVPTXInst<(outs Float64Regs:$dst),
                (ins Float64Regs:$a, f64imm:$b),
                !strconcat(OpcStr, ".rn.f64 \t$dst, $a, $b;"),
-               [(set Float64Regs:$dst, (OpNode Float64Regs:$a, fpimm:$b))]>,
+               [(set f64:$dst, (OpNode f64:$a, fpimm:$b))]>,
                Requires<[noFMA]>;
    def _rnf32rr_ftz :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, Float32Regs:$b),
                !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, Float32Regs:$b))]>,
                Requires<[noFMA, doF32FTZ]>;
    def _rnf32ri_ftz :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, f32imm:$b),
                !strconcat(OpcStr, ".rn.ftz.f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, fpimm:$b))]>,
                Requires<[noFMA, doF32FTZ]>;
    def _rnf32rr :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, Float32Regs:$b),
                !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, Float32Regs:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, f32:$b))]>,
                Requires<[noFMA]>;
    def _rnf32ri :
      NVPTXInst<(outs Float32Regs:$dst),
                (ins Float32Regs:$a, f32imm:$b),
                !strconcat(OpcStr, ".rn.f32 \t$dst, $a, $b;"),
-               [(set Float32Regs:$dst, (OpNode Float32Regs:$a, fpimm:$b))]>,
+               [(set f32:$dst, (OpNode f32:$a, fpimm:$b))]>,
                Requires<[noFMA]>;
    def _rnf16rr_ftz :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".rn.ftz.f16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b)))]>,
+               [(set f16:$dst, (OpNode f16:$a, f16:$b))]>,
                Requires<[useFP16Math, noFMA, doF32FTZ]>;
    def _rnf16rr :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".rn.f16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b)))]>,
+               [(set f16:$dst, (OpNode f16:$a, f16:$b))]>,
                Requires<[useFP16Math, noFMA]>;
    def _rnf16x2rr_ftz :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".rn.ftz.f16x2 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (v2f16 Int32Regs:$a), (v2f16 Int32Regs:$b)))]>,
+               [(set v2f16:$dst, (OpNode v2f16:$a, v2f16:$b))]>,
                Requires<[useFP16Math, noFMA, doF32FTZ]>;
    def _rnf16x2rr :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".rn.f16x2 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (v2f16 Int32Regs:$a), (v2f16 Int32Regs:$b)))]>,
+               [(set v2f16:$dst, (OpNode v2f16:$a, v2f16:$b))]>,
                Requires<[useFP16Math, noFMA]>;
   def _rnbf16rr_ftz :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".rn.ftz.bf16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (bf16 Int16Regs:$a), (bf16 Int16Regs:$b)))]>,
+               [(set bf16:$dst, (OpNode bf16:$a, bf16:$b))]>,
                Requires<[hasBF16Math, noFMA, doF32FTZ]>;
    def _rnbf16rr :
      NVPTXInst<(outs Int16Regs:$dst),
                (ins Int16Regs:$a, Int16Regs:$b),
                !strconcat(OpcStr, ".rn.bf16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode (bf16 Int16Regs:$a), (bf16 Int16Regs:$b)))]>,
+               [(set bf16:$dst, (OpNode bf16:$a, bf16:$b))]>,
                Requires<[hasBF16Math, noFMA]>;
    def _rnbf16x2rr_ftz :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".rn.ftz.bf16x2 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (v2bf16 Int32Regs:$a), (v2bf16 Int32Regs:$b)))]>,
+               [(set v2bf16:$dst, (OpNode v2bf16:$a, v2bf16:$b))]>,
                Requires<[hasBF16Math, noFMA, doF32FTZ]>;
    def _rnbf16x2rr :
      NVPTXInst<(outs Int32Regs:$dst),
                (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, ".rn.bf16x2 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (v2bf16 Int32Regs:$a), (v2bf16 Int32Regs:$b)))]>,
+               [(set v2bf16:$dst, (OpNode v2bf16:$a, v2bf16:$b))]>,
                Requires<[hasBF16Math, noFMA]>;
 }
 
@@ -524,40 +524,40 @@ multiclass F3_fma_component<string OpcStr, SDNode OpNode> {
 multiclass F2<string OpcStr, SDNode OpNode> {
    def f64 :     NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$a),
                            !strconcat(OpcStr, ".f64 \t$dst, $a;"),
-                           [(set Float64Regs:$dst, (OpNode Float64Regs:$a))]>;
+                           [(set f64:$dst, (OpNode f64:$a))]>;
    def f32_ftz : NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
                            !strconcat(OpcStr, ".ftz.f32 \t$dst, $a;"),
-                           [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>,
+                           [(set f32:$dst, (OpNode f32:$a))]>,
                            Requires<[doF32FTZ]>;
    def f32 :     NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$a),
                            !strconcat(OpcStr, ".f32 \t$dst, $a;"),
-                           [(set Float32Regs:$dst, (OpNode Float32Regs:$a))]>;
+                           [(set f32:$dst, (OpNode f32:$a))]>;
 }
 
 multiclass F2_Support_Half<string OpcStr, SDNode OpNode> {
    def bf16 :      NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a),
                            !strconcat(OpcStr, ".bf16 \t$dst, $a;"),
-                           [(set Int16Regs:$dst, (OpNode (bf16 Int16Regs:$a)))]>,
+                           [(set bf16:$dst, (OpNode bf16:$a))]>,
                            Requires<[hasSM<80>, hasPTX<70>]>;
    def bf16x2 :    NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
                            !strconcat(OpcStr, ".bf16x2 \t$dst, $a;"),
-                           [(set Int32Regs:$dst, (OpNode (v2bf16 Int32Regs:$a)))]>,
+                           [(set v2bf16:$dst, (OpNode v2bf16:$a))]>,
                            Requires<[hasSM<80>, hasPTX<70>]>;
    def f16_ftz :   NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a),
                            !strconcat(OpcStr, ".ftz.f16 \t$dst, $a;"),
-                           [(set Int16Regs:$dst, (OpNode (f16 Int16Regs:$a)))]>,
+                           [(set f16:$dst, (OpNode f16:$a))]>,
                            Requires<[hasSM<53>, hasPTX<65>, doF32FTZ]>;
    def f16x2_ftz : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
                            !strconcat(OpcStr, ".ftz.f16x2 \t$dst, $a;"),
-                           [(set Int32Regs:$dst, (OpNode (v2f16 Int32Regs:$a)))]>,
+                           [(set v2f16:$dst, (OpNode v2f16:$a))]>,
                            Requires<[hasSM<53>, hasPTX<65>, doF32FTZ]>;
    def f16 :       NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a),
                            !strconcat(OpcStr, ".f16 \t$dst, $a;"),
-                           [(set Int16Regs:$dst, (OpNode (f16 Int16Regs:$a)))]>,
+                           [(set f16:$dst, (OpNode f16:$a))]>,
                            Requires<[hasSM<53>, hasPTX<65>]>;
    def f16x2 :     NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
                            !strconcat(OpcStr, ".f16x2 \t$dst, $a;"),
-                           [(set Int32Regs:$dst, (OpNode (v2f16 Int32Regs:$a)))]>,
+                           [(set v2f16:$dst, (OpNode v2f16:$a))]>,
                            Requires<[hasSM<53>, hasPTX<65>]>;
 
 }
@@ -731,13 +731,13 @@ def fpround_oneuse : PatFrag<(ops node:$a), (fpround node:$a), [{
   return N->hasOneUse();
 }]>;
 
-def : Pat<(v2bf16 (build_vector (bf16 (fpround_oneuse Float32Regs:$lo)),
-                                (bf16 (fpround_oneuse Float32Regs:$hi)))),
+def : Pat<(v2bf16 (build_vector (bf16 (fpround_oneuse f32:$lo)),
+                                (bf16 (fpround_oneuse f32:$hi)))),
           (CVT_bf16x2_f32 Float32Regs:$hi, Float32Regs:$lo, CvtRN)>,
       Requires<[hasPTX<70>, hasSM<80>, hasBF16Math]>;
 
-def : Pat<(v2f16 (build_vector (f16 (fpround_oneuse Float32Regs:$lo)),
-                               (f16 (fpround_oneuse Float32Regs:$hi)))),
+def : Pat<(v2f16 (build_vector (f16 (fpround_oneuse f32:$lo)),
+                               (f16 (fpround_oneuse f32:$hi)))),
           (CVT_f16x2_f32 Float32Regs:$hi, Float32Regs:$lo, CvtRN)>,
       Requires<[hasPTX<70>, hasSM<80>, useFP16Math]>;
 
@@ -771,22 +771,22 @@ let hasSideEffects = false in {
       NVPTXInst<(outs RC:$dst),
                 (ins RC:$a, RC:$b, Int1Regs:$p),
                 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
-                [(set (T RC:$dst), (select Int1Regs:$p, (T RC:$a), (T RC:$b)))]>;
+                [(set T:$dst, (select i1:$p, T:$a, T:$b))]>;
     def ri :
       NVPTXInst<(outs RC:$dst),
                 (ins RC:$a, ImmCls:$b, Int1Regs:$p),
                 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
-                [(set (T RC:$dst), (select Int1Regs:$p, (T RC:$a), (T ImmNode:$b)))]>;
+                [(set T:$dst, (select i1:$p, T:$a, (T ImmNode:$b)))]>;
     def ir :
       NVPTXInst<(outs RC:$dst),
                 (ins ImmCls:$a, RC:$b, Int1Regs:$p),
                 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
-                [(set (T RC:$dst), (select Int1Regs:$p, ImmNode:$a, (T RC:$b)))]>;
+                [(set T:$dst, (select i1:$p, ImmNode:$a, T:$b))]>;
     def ii :
       NVPTXInst<(outs RC:$dst),
                 (ins ImmCls:$a, ImmCls:$b, Int1Regs:$p),
                 !strconcat("selp.", TypeStr, " \t$dst, $a, $b, $p;"),
-                [(set (T RC:$dst), (select Int1Regs:$p, ImmNode:$a, ImmNode:$b))]>;
+                [(set T:$dst, (select i1:$p, ImmNode:$a, ImmNode:$b))]>;
   }
 }
 
@@ -812,7 +812,7 @@ defm SELP_f64 : SELP_PATTERN<"f64", f64, Float64Regs, f64imm, fpimm>;
 // defm SELP_f16x2 : SELP_PATTERN<"b32", v2f16, Int32Regs, v2f16imm, imm>;
 
 foreach vt = [v2f16, v2bf16, v2i16, v4i8] in {
-def : Pat<(vt (select Int1Regs:$p, (vt Int32Regs:$a), (vt Int32Regs:$b))),
+def : Pat<(vt (select i1:$p, vt:$a, vt:$b)),
           (SELP_b32rr Int32Regs:$a, Int32Regs:$b, Int1Regs:$p)>;
 }
 
@@ -841,10 +841,10 @@ def TESTINF_f64i : NVPTXInst<(outs Int1Regs:$p), (ins f64imm:$a),
 multiclass ADD_SUB_i1<SDNode OpNode> {
    def _rr: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
                       "xor.pred \t$dst, $a, $b;",
-                      [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+                      [(set i1:$dst, (OpNode i1:$a, i1:$b))]>;
    def _ri: NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
                       "xor.pred \t$dst, $a, $b;",
-                      [(set Int1Regs:$dst, (OpNode Int1Regs:$a, (imm):$b))]>;
+                      [(set i1:$dst, (OpNode i1:$a, (imm):$b))]>;
 }
 
 // int1 addition and subtraction are both just xor.
@@ -885,7 +885,7 @@ defm UREM : I3<"rem.u", urem>;
 multiclass ABS<ValueType T, RegisterClass RC, string SizeName> {
   def : NVPTXInst<(outs RC:$dst), (ins RC:$a),
                   !strconcat("abs", SizeName, " \t$dst, $a;"),
-                  [(set (T RC:$dst), (abs (T RC:$a)))]>;
+                  [(set T:$dst, (abs T:$a))]>;
 }
 defm ABS_16 : ABS<i16, Int16Regs, ".s16">;
 defm ABS_32 : ABS<i32, Int32Regs, ".s32">;
@@ -954,26 +954,26 @@ def mul_wide_unsigned : SDNode<"NVPTXISD::MUL_WIDE_UNSIGNED", SDTMulWide>;
 def : Pat<(i32 (mul_wide_signed i16:$a, i16:$b)),
           (MULWIDES32 i16:$a, i16:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_signed Int16Regs:$a, imm:$b)),
+def : Pat<(i32 (mul_wide_signed i16:$a, imm:$b)),
           (MULWIDES32Imm Int16Regs:$a, imm:$b)>,
       Requires<[doMulWide]>;
 def : Pat<(i32 (mul_wide_unsigned i16:$a, i16:$b)),
           (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(i32 (mul_wide_unsigned Int16Regs:$a, imm:$b)),
+def : Pat<(i32 (mul_wide_unsigned i16:$a, imm:$b)),
           (MULWIDEU32Imm Int16Regs:$a, imm:$b)>,
       Requires<[doMulWide]>;
 
 def : Pat<(i64 (mul_wide_signed i32:$a, i32:$b)),
           (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_signed (i32 Int32Regs:$a), imm:$b)),
+def : Pat<(i64 (mul_wide_signed i32:$a, imm:$b)),
           (MULWIDES64Imm Int32Regs:$a, imm:$b)>,
       Requires<[doMulWide]>;
 def : Pat<(i64 (mul_wide_unsigned i32:$a, i32:$b)),
           (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(i64 (mul_wide_unsigned (i32 Int32Regs:$a), imm:$b)),
+def : Pat<(i64 (mul_wide_unsigned i32:$a, imm:$b)),
           (MULWIDEU64Imm Int32Regs:$a, imm:$b)>,
       Requires<[doMulWide]>;
 
@@ -1023,46 +1023,46 @@ def SHL2MUL16 : SDNodeXForm<imm, [{
 }]>;
 
 // Convert "sign/zero-extend, then shift left by an immediate" to mul.wide.
-def : Pat<(shl (sext Int32Regs:$a), (i32 IntConst_0_30:$b)),
+def : Pat<(shl (sext i32:$a), (i32 IntConst_0_30:$b)),
           (MULWIDES64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
       Requires<[doMulWide]>;
-def : Pat<(shl (zext Int32Regs:$a), (i32 IntConst_0_30:$b)),
+def : Pat<(shl (zext i32:$a), (i32 IntConst_0_30:$b)),
           (MULWIDEU64Imm Int32Regs:$a, (SHL2MUL32 node:$b))>,
       Requires<[doMulWide]>;
 
-def : Pat<(shl (sext Int16Regs:$a), (i16 IntConst_0_14:$b)),
+def : Pat<(shl (sext i16:$a), (i16 IntConst_0_14:$b)),
           (MULWIDES32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
       Requires<[doMulWide]>;
-def : Pat<(shl (zext Int16Regs:$a), (i16 IntConst_0_14:$b)),
+def : Pat<(shl (zext i16:$a), (i16 IntConst_0_14:$b)),
           (MULWIDEU32Imm Int16Regs:$a, (SHL2MUL16 node:$b))>,
       Requires<[doMulWide]>;
 
 // Convert "sign/zero-extend then multiply" to mul.wide.
-def : Pat<(mul (sext Int32Regs:$a), (sext Int32Regs:$b)),
+def : Pat<(mul (sext i32:$a), (sext i32:$b)),
           (MULWIDES64 Int32Regs:$a, Int32Regs:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(mul (sext Int32Regs:$a), (i64 SInt32Const:$b)),
+def : Pat<(mul (sext i32:$a), (i64 SInt32Const:$b)),
           (MULWIDES64Imm64 Int32Regs:$a, (i64 SInt32Const:$b))>,
       Requires<[doMulWide]>;
 
-def : Pat<(mul (zext Int32Regs:$a), (zext Int32Regs:$b)),
+def : Pat<(mul (zext i32:$a), (zext i32:$b)),
           (MULWIDEU64 Int32Regs:$a, Int32Regs:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(mul (zext Int32Regs:$a), (i64 UInt32Const:$b)),
+def : Pat<(mul (zext i32:$a), (i64 UInt32Const:$b)),
           (MULWIDEU64Imm64 Int32Regs:$a, (i64 UInt32Const:$b))>,
       Requires<[doMulWide]>;
 
-def : Pat<(mul (sext Int16Regs:$a), (sext Int16Regs:$b)),
+def : Pat<(mul (sext i16:$a), (sext i16:$b)),
           (MULWIDES32 Int16Regs:$a, Int16Regs:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(mul (sext Int16Regs:$a), (i32 SInt16Const:$b)),
+def : Pat<(mul (sext i16:$a), (i32 SInt16Const:$b)),
           (MULWIDES32Imm32 Int16Regs:$a, (i32 SInt16Const:$b))>,
       Requires<[doMulWide]>;
 
-def : Pat<(mul (zext Int16Regs:$a), (zext Int16Regs:$b)),
+def : Pat<(mul (zext i16:$a), (zext i16:$b)),
           (MULWIDEU32 Int16Regs:$a, Int16Regs:$b)>,
       Requires<[doMulWide]>;
-def : Pat<(mul (zext Int16Regs:$a), (i32 UInt16Const:$b)),
+def : Pat<(mul (zext i16:$a), (i32 UInt16Const:$b)),
           (MULWIDEU32Imm32 Int16Regs:$a, (i32 UInt16Const:$b))>,
       Requires<[doMulWide]>;
 
@@ -1078,77 +1078,77 @@ def MAD16rrr :
   NVPTXInst<(outs Int16Regs:$dst),
             (ins Int16Regs:$a, Int16Regs:$b, Int16Regs:$c),
             "mad.lo.s16 \t$dst, $a, $b, $c;",
-            [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, Int16Regs:$c))]>;
+            [(set i16:$dst, (imad i16:$a, i16:$b, i16:$c))]>;
 def MAD16rri :
   NVPTXInst<(outs Int16Regs:$dst),
             (ins Int16Regs:$a, Int16Regs:$b, i16imm:$c),
             "mad.lo.s16 \t$dst, $a, $b, $c;",
-            [(set Int16Regs:$dst, (imad Int16Regs:$a, Int16Regs:$b, imm:$c))]>;
+            [(set i16:$dst, (imad i16:$a, i16:$b, imm:$c))]>;
 def MAD16rir :
   NVPTXInst<(outs Int16Regs:$dst),
             (ins Int16Regs:$a, i16imm:$b, Int16Regs:$c),
             "mad.lo.s16 \t$dst, $a, $b, $c;",
-            [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, Int16Regs:$c))]>;
+            [(set i16:$dst, (imad i16:$a, imm:$b, i16:$c))]>;
 def MAD16rii :
   NVPTXInst<(outs Int16Regs:$dst),
             (ins Int16Regs:$a, i16imm:$b, i16imm:$c),
             "mad.lo.s16 \t$dst, $a, $b, $c;",
-            [(set Int16Regs:$dst, (imad Int16Regs:$a, imm:$b, imm:$c))]>;
+            [(set i16:$dst, (imad i16:$a, imm:$b, imm:$c))]>;
 
 def MAD32rrr :
   NVPTXInst<(outs Int32Regs:$dst),
             (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
             "mad.lo.s32 \t$dst, $a, $b, $c;",
-            [(set (i32 Int32Regs:$dst), (imad (i32 Int32Regs:$a), (i32 Int32Regs:$b), (i32 Int32Regs:$c)))]>;
+            [(set i32:$dst, (imad i32:$a, i32:$b, i32:$c))]>;
 def MAD32rri :
   NVPTXInst<(outs Int32Regs:$dst),
             (ins Int32Regs:$a, Int32Regs:$b, i32imm:$c),
             "mad.lo.s32 \t$dst, $a, $b, $c;",
-            [(set (i32 Int32Regs:$dst), (imad (i32 Int32Regs:$a), (i32 Int32Regs:$b), imm:$c))]>;
+            [(set i32:$dst, (imad i32:$a, i32:$b, imm:$c))]>;
 def MAD32rir :
   NVPTXInst<(outs Int32Regs:$dst),
             (ins Int32Regs:$a, i32imm:$b, Int32Regs:$c),
             "mad.lo.s32 \t$dst, $a, $b, $c;",
-            [(set (i32 Int32Regs:$dst), (imad (i32 Int32Regs:$a), imm:$b, (i32 Int32Regs:$c)))]>;
+            [(set i32:$dst, (imad i32:$a, imm:$b, i32:$c))]>;
 def MAD32rii :
   NVPTXInst<(outs Int32Regs:$dst),
             (ins Int32Regs:$a, i32imm:$b, i32imm:$c),
             "mad.lo.s32 \t$dst, $a, $b, $c;",
-            [(set (i32 Int32Regs:$dst), (imad (i32 Int32Regs:$a), imm:$b, imm:$c))]>;
+            [(set i32:$dst, (imad i32:$a, imm:$b, imm:$c))]>;
 
 def MAD64rrr :
   NVPTXInst<(outs Int64Regs:$dst),
             (ins Int64Regs:$a, Int64Regs:$b, Int64Regs:$c),
             "mad.lo.s64 \t$dst, $a, $b, $c;",
-            [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, Int64Regs:$c))]>;
+            [(set i64:$dst, (imad i64:$a, i64:$b, i64:$c))]>;
 def MAD64rri :
   NVPTXInst<(outs Int64Regs:$dst),
             (ins Int64Regs:$a, Int64Regs:$b, i64imm:$c),
             "mad.lo.s64 \t$dst, $a, $b, $c;",
-            [(set Int64Regs:$dst, (imad Int64Regs:$a, Int64Regs:$b, imm:$c))]>;
+            [(set i64:$dst, (imad i64:$a, i64:$b, imm:$c))]>;
 def MAD64rir :
   NVPTXInst<(outs Int64Regs:$dst),
             (ins Int64Regs:$a, i64imm:$b, Int64Regs:$c),
             "mad.lo.s64 \t$dst, $a, $b, $c;",
-            [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, Int64Regs:$c))]>;
+            [(set i64:$dst, (imad i64:$a, imm:$b, i64:$c))]>;
 def MAD64rii :
   NVPTXInst<(outs Int64Regs:$dst),
             (ins Int64Regs:$a, i64imm:$b, i64imm:$c),
             "mad.lo.s64 \t$dst, $a, $b, $c;",
-            [(set Int64Regs:$dst, (imad Int64Regs:$a, imm:$b, imm:$c))]>;
+            [(set i64:$dst, (imad i64:$a, imm:$b, imm:$c))]>;
 
 def INEG16 :
   NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
             "neg.s16 \t$dst, $src;",
-            [(set Int16Regs:$dst, (ineg Int16Regs:$src))]>;
+            [(set i16:$dst, (ineg i16:$src))]>;
 def INEG32 :
   NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
             "neg.s32 \t$dst, $src;",
-            [(set (i32 Int32Regs:$dst), (ineg (i32 Int32Regs:$src)))]>;
+            [(set i32:$dst, (ineg i32:$src))]>;
 def INEG64 :
   NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
             "neg.s64 \t$dst, $src;",
-            [(set Int64Regs:$dst, (ineg Int64Regs:$src))]>;
+            [(set i64:$dst, (ineg i64:$src))]>;
 
 //-----------------------------------
 // Floating Point Arithmetic
@@ -1210,7 +1210,7 @@ defm FSQRT : F2<"sqrt.rn", fsqrt>;
 class FNEG_F16_F16X2<string OpcStr, ValueType T, RegisterClass RC, Predicate Pred> :
       NVPTXInst<(outs RC:$dst), (ins RC:$src),
                 !strconcat(OpcStr, " \t$dst, $src;"),
-                [(set RC:$dst, (fneg (T RC:$src)))]>,
+                [(set T:$dst, (fneg T:$src))]>,
                 Requires<[useFP16Math, hasPTX<60>, hasSM<53>, Pred]>;
 def FNEG16_ftz   : FNEG_F16_F16X2<"neg.ftz.f16", f16, Int16Regs, doF32FTZ>;
 def FNEG16       : FNEG_F16_F16X2<"neg.f16", f16, Int16Regs, True>;
@@ -1224,7 +1224,7 @@ def FNEG16x2     : FNEG_F16_F16X2<"neg.f16x2", v2f16, Int32Regs, True>;
 class FNEG_BF16_F16X2<string OpcStr, ValueType T, RegisterClass RC, Predicate Pred> :
       NVPTXInst<(outs RC:$dst), (ins RC:$src),
                 !strconcat(OpcStr, " \t$dst, $src;"),
-                [(set RC:$dst, (fneg (T RC:$src)))]>,
+                [(set T:$dst, (fneg T:$src))]>,
                 Requires<[hasBF16Math, hasPTX<70>, hasSM<80>, Pred]>;
 def BFNEG16_ftz   : FNEG_BF16_F16X2<"neg.ftz.bf16", bf16, Int16Regs, doF32FTZ>;
 def BFNEG16       : FNEG_BF16_F16X2<"neg.bf16", bf16, Int16Regs, True>;
@@ -1238,21 +1238,21 @@ def FDIV641r :
   NVPTXInst<(outs Float64Regs:$dst),
             (ins f64imm:$a, Float64Regs:$b),
             "rcp.rn.f64 \t$dst, $b;",
-            [(set Float64Regs:$dst, (fdiv DoubleConst1:$a, Float64Regs:$b))]>;
+            [(set f64:$dst, (fdiv DoubleConst1:$a, f64:$b))]>;
 def FDIV64rr :
   NVPTXInst<(outs Float64Regs:$dst),
             (ins Float64Regs:$a, Float64Regs:$b),
             "div.rn.f64 \t$dst, $a, $b;",
-            [(set Float64Regs:$dst, (fdiv Float64Regs:$a, Float64Regs:$b))]>;
+            [(set f64:$dst, (fdiv f64:$a, f64:$b))]>;
 def FDIV64ri :
   NVPTXInst<(outs Float64Regs:$dst),
             (ins Float64Regs:$a, f64imm:$b),
             "div.rn.f64 \t$dst, $a, $b;",
-            [(set Float64Regs:$dst, (fdiv Float64Regs:$a, fpimm:$b))]>;
+            [(set f64:$dst, (fdiv f64:$a, fpimm:$b))]>;
 
 // fdiv will be converted to rcp
 // fneg (fdiv 1.0, X) => fneg (rcp.rn X)
-def : Pat<(fdiv DoubleConstNeg1:$a, Float64Regs:$b),
+def : Pat<(fdiv DoubleConstNeg1:$a, f64:$b),
           (FNEGf64 (FDIV641r (NegDoubleConst node:$a), Float64Regs:$b))>;
 
 //
@@ -1262,13 +1262,13 @@ def FDIV321r_ftz :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins f32imm:$a, Float32Regs:$b),
             "rcp.approx.ftz.f32 \t$dst, $b;",
-            [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv FloatConst1:$a, f32:$b))]>,
             Requires<[do_DIVF32_APPROX, doF32FTZ]>;
 def FDIV321r :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins f32imm:$a, Float32Regs:$b),
             "rcp.approx.f32 \t$dst, $b;",
-            [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv FloatConst1:$a, f32:$b))]>,
             Requires<[do_DIVF32_APPROX]>;
 //
 // F32 Approximate division
@@ -1277,25 +1277,25 @@ def FDIV32approxrr_ftz :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, Float32Regs:$b),
             "div.approx.ftz.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv f32:$a, f32:$b))]>,
             Requires<[do_DIVF32_APPROX, doF32FTZ]>;
 def FDIV32approxri_ftz :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, f32imm:$b),
             "div.approx.ftz.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+            [(set f32:$dst, (fdiv f32:$a, fpimm:$b))]>,
             Requires<[do_DIVF32_APPROX, doF32FTZ]>;
 def FDIV32approxrr :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, Float32Regs:$b),
             "div.approx.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv f32:$a, f32:$b))]>,
             Requires<[do_DIVF32_APPROX]>;
 def FDIV32approxri :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, f32imm:$b),
             "div.approx.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+            [(set f32:$dst, (fdiv f32:$a, fpimm:$b))]>,
             Requires<[do_DIVF32_APPROX]>;
 //
 // F32 Semi-accurate reciprocal
@@ -1306,13 +1306,13 @@ def FDIV321r_approx_ftz :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins f32imm:$a, Float32Regs:$b),
             "rcp.approx.ftz.f32 \t$dst, $b;",
-            [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv FloatConst1:$a, f32:$b))]>,
             Requires<[do_DIVF32_FULL, doF32FTZ]>;
 def FDIV321r_approx :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins f32imm:$a, Float32Regs:$b),
             "rcp.approx.f32 \t$dst, $b;",
-            [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv FloatConst1:$a, f32:$b))]>,
             Requires<[do_DIVF32_FULL]>;
 //
 // F32 Semi-accurate division
@@ -1321,25 +1321,25 @@ def FDIV32rr_ftz :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, Float32Regs:$b),
             "div.full.ftz.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv Float32Regs:$a, f32:$b))]>,
             Requires<[do_DIVF32_FULL, doF32FTZ]>;
 def FDIV32ri_ftz :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, f32imm:$b),
             "div.full.ftz.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+            [(set f32:$dst, (fdiv f32:$a, fpimm:$b))]>,
             Requires<[do_DIVF32_FULL, doF32FTZ]>;
 def FDIV32rr :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, Float32Regs:$b),
             "div.full.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv f32:$a, f32:$b))]>,
             Requires<[do_DIVF32_FULL]>;
 def FDIV32ri :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, f32imm:$b),
             "div.full.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+            [(set f32:$dst, (fdiv f32:$a, fpimm:$b))]>,
             Requires<[do_DIVF32_FULL]>;
 //
 // F32 Accurate reciprocal
@@ -1348,13 +1348,13 @@ def FDIV321r_prec_ftz :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins f32imm:$a, Float32Regs:$b),
             "rcp.rn.ftz.f32 \t$dst, $b;",
-            [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv FloatConst1:$a, f32:$b))]>,
             Requires<[doF32FTZ]>;
 def FDIV321r_prec :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins f32imm:$a, Float32Regs:$b),
             "rcp.rn.f32 \t$dst, $b;",
-            [(set Float32Regs:$dst, (fdiv FloatConst1:$a, Float32Regs:$b))]>;
+            [(set f32:$dst, (fdiv FloatConst1:$a, f32:$b))]>;
 //
 // F32 Accurate division
 //
@@ -1362,24 +1362,24 @@ def FDIV32rr_prec_ftz :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, Float32Regs:$b),
             "div.rn.ftz.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>,
+            [(set f32:$dst, (fdiv f32:$a, f32:$b))]>,
             Requires<[doF32FTZ]>;
 def FDIV32ri_prec_ftz :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, f32imm:$b),
             "div.rn.ftz.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>,
+            [(set f32:$dst, (fdiv f32:$a, fpimm:$b))]>,
             Requires<[doF32FTZ]>;
 def FDIV32rr_prec :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, Float32Regs:$b),
             "div.rn.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, Float32Regs:$b))]>;
+            [(set f32:$dst, (fdiv f32:$a, f32:$b))]>;
 def FDIV32ri_prec :
   NVPTXInst<(outs Float32Regs:$dst),
             (ins Float32Regs:$a, f32imm:$b),
             "div.rn.f32 \t$dst, $a, $b;",
-            [(set Float32Regs:$dst, (fdiv Float32Regs:$a, fpimm:$b))]>;
+            [(set f32:$dst, (fdiv f32:$a, fpimm:$b))]>;
 
 //
 // FMA
@@ -1410,14 +1410,14 @@ multiclass FMA<string OpcStr, RegisterClass RC, Operand ImmCls, Predicate Pred>
 multiclass FMA_F16<string OpcStr, ValueType T, RegisterClass RC, Predicate Pred> {
    def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
                        !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
-                       [(set RC:$dst, (fma (T RC:$a), (T RC:$b), (T RC:$c)))]>,
+                       [(set T:$dst, (fma T:$a, T:$b, T:$c))]>,
                        Requires<[useFP16Math, Pred]>;
 }
 
 multiclass FMA_BF16<string OpcStr, ValueType T, RegisterClass RC, Predicate Pred> {
    def rrr : NVPTXInst<(outs RC:$dst), (ins RC:$a, RC:$b, RC:$c),
                        !strconcat(OpcStr, " \t$dst, $a, $b, $c;"),
-                       [(set RC:$dst, (fma (T RC:$a), (T RC:$b), (T RC:$c)))]>,
+                       [(set T:$dst, (fma T:$a, T:$b, T:$c))]>,
                        Requires<[hasBF16Math, Pred]>;
 }
 
@@ -1434,11 +1434,11 @@ defm FMA64        : FMA<"fma.rn.f64", Float64Regs, f64imm, True>;
 // sin/cos
 def SINF:  NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
                       "sin.approx.f32 \t$dst, $src;",
-                      [(set Float32Regs:$dst, (fsin Float32Regs:$src))]>,
+                      [(set f32:$dst, (fsin f32:$src))]>,
                       Requires<[allowUnsafeFPMath]>;
 def COSF:  NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
                       "cos.approx.f32 \t$dst, $src;",
-                      [(set Float32Regs:$dst, (fcos Float32Regs:$src))]>,
+                      [(set f32:$dst, (fcos f32:$src))]>,
                       Requires<[allowUnsafeFPMath]>;
 
 // Lower (frem x, y) into (sub x, (mul (ftrunc (div x, y)) y)),
@@ -1446,25 +1446,25 @@ def COSF:  NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src),
 // semantics of LLVM's frem.
 
 // frem - f32 FTZ
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+def : Pat<(frem f32:$x, f32:$y),
           (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
             (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRZI_FTZ),
              Float32Regs:$y))>,
           Requires<[doF32FTZ, allowUnsafeFPMath]>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
+def : Pat<(frem f32:$x, fpimm:$y),
           (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
             (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRZI_FTZ),
              fpimm:$y))>,
           Requires<[doF32FTZ, allowUnsafeFPMath]>;
 
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+def : Pat<(frem f32:$x, Float32Regs:$y),
           (SELP_f32rr Float32Regs:$x,
             (FSUBf32rr_ftz Float32Regs:$x, (FMULf32rr_ftz (CVT_f32_f32
               (FDIV32rr_prec_ftz Float32Regs:$x, Float32Regs:$y), CvtRZI_FTZ),
               Float32Regs:$y)),
             (TESTINF_f32r Float32Regs:$y))>,
           Requires<[doF32FTZ, noUnsafeFPMath]>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
+def : Pat<(frem f32:$x, fpimm:$y),
           (SELP_f32rr Float32Regs:$x,
             (FSUBf32rr_ftz Float32Regs:$x, (FMULf32ri_ftz (CVT_f32_f32
               (FDIV32ri_prec_ftz Float32Regs:$x, fpimm:$y), CvtRZI_FTZ),
@@ -1473,25 +1473,25 @@ def : Pat<(frem Float32Regs:$x, fpimm:$y),
           Requires<[doF32FTZ, noUnsafeFPMath]>;
 
 // frem - f32
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+def : Pat<(frem f32:$x, f32:$y),
           (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
             (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRZI),
              Float32Regs:$y))>,
           Requires<[allowUnsafeFPMath]>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
+def : Pat<(frem f32:$x, fpimm:$y),
           (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
             (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRZI),
              fpimm:$y))>,
           Requires<[allowUnsafeFPMath]>;
 
-def : Pat<(frem Float32Regs:$x, Float32Regs:$y),
+def : Pat<(frem f32:$x, f32:$y),
           (SELP_f32rr Float32Regs:$x,
             (FSUBf32rr Float32Regs:$x, (FMULf32rr (CVT_f32_f32
               (FDIV32rr_prec Float32Regs:$x, Float32Regs:$y), CvtRZI),
               Float32Regs:$y)),
             (TESTINF_f32r Float32Regs:$y))>,
           Requires<[noUnsafeFPMath]>;
-def : Pat<(frem Float32Regs:$x, fpimm:$y),
+def : Pat<(frem f32:$x, fpimm:$y),
           (SELP_f32rr Float32Regs:$x,
             (FSUBf32rr Float32Regs:$x, (FMULf32ri (CVT_f32_f32
               (FDIV32ri_prec Float32Regs:$x, fpimm:$y), CvtRZI),
@@ -1500,25 +1500,25 @@ def : Pat<(frem Float32Regs:$x, fpimm:$y),
           Requires<[noUnsafeFPMath]>;
 
 // frem - f64
-def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
+def : Pat<(frem f64:$x, f64:$y),
           (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
             (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRZI),
              Float64Regs:$y))>,
           Requires<[allowUnsafeFPMath]>;
-def : Pat<(frem Float64Regs:$x, fpimm:$y),
+def : Pat<(frem f64:$x, fpimm:$y),
           (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
             (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRZI),
              fpimm:$y))>,
           Requires<[allowUnsafeFPMath]>;
 
-def : Pat<(frem Float64Regs:$x, Float64Regs:$y),
+def : Pat<(frem f64:$x, f64:$y),
           (SELP_f64rr Float64Regs:$x,
             (FSUBf64rr Float64Regs:$x, (FMULf64rr (CVT_f64_f64
               (FDIV64rr Float64Regs:$x, Float64Regs:$y), CvtRZI),
                Float64Regs:$y)),
             (TESTINF_f64r Float64Regs:$y))>,
           Requires<[noUnsafeFPMath]>;
-def : Pat<(frem Float64Regs:$x, fpimm:$y),
+def : Pat<(frem f64:$x, fpimm:$y),
           (SELP_f64rr Float64Regs:$x,
             (FSUBf64rr Float64Regs:$x, (FMULf64ri (CVT_f64_f64
               (FDIV64ri Float64Regs:$x, fpimm:$y), CvtRZI),
@@ -1536,35 +1536,35 @@ multiclass BITWISE<string OpcStr, SDNode OpNode> {
   def b1rr :
     NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, Int1Regs:$b),
               !strconcat(OpcStr, ".pred  \t$dst, $a, $b;"),
-              [(set Int1Regs:$dst, (OpNode Int1Regs:$a, Int1Regs:$b))]>;
+              [(set i1:$dst, (OpNode i1:$a, i1:$b))]>;
   def b1ri :
     NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$a, i1imm:$b),
               !strconcat(OpcStr, ".pred  \t$dst, $a, $b;"),
-              [(set Int1Regs:$dst, (OpNode Int1Regs:$a, imm:$b))]>;
+              [(set i1:$dst, (OpNode i1:$a, imm:$b))]>;
   def b16rr :
     NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int16Regs:$b),
               !strconcat(OpcStr, ".b16  \t$dst, $a, $b;"),
-              [(set Int16Regs:$dst, (OpNode Int16Regs:$a, Int16Regs:$b))]>;
+              [(set i16:$dst, (OpNode i16:$a, i16:$b))]>;
   def b16ri :
     NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i16imm:$b),
               !strconcat(OpcStr, ".b16  \t$dst, $a, $b;"),
-              [(set Int16Regs:$dst, (OpNode Int16Regs:$a, imm:$b))]>;
+              [(set i16:$dst, (OpNode i16:$a, imm:$b))]>;
   def b32rr :
     NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
               !strconcat(OpcStr, ".b32  \t$dst, $a, $b;"),
-              [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 Int32Regs:$b)))]>;
+              [(set i32:$dst, (OpNode i32:$a, i32:$b))]>;
   def b32ri :
     NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
               !strconcat(OpcStr, ".b32  \t$dst, $a, $b;"),
-              [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), imm:$b))]>;
+              [(set i32:$dst, (OpNode i32:$a, imm:$b))]>;
   def b64rr :
     NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int64Regs:$b),
               !strconcat(OpcStr, ".b64  \t$dst, $a, $b;"),
-              [(set Int64Regs:$dst, (OpNode Int64Regs:$a, Int64Regs:$b))]>;
+              [(set i64:$dst, (OpNode i64:$a, i64:$b))]>;
   def b64ri :
     NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i64imm:$b),
               !strconcat(OpcStr, ".b64  \t$dst, $a, $b;"),
-              [(set Int64Regs:$dst, (OpNode Int64Regs:$a, imm:$b))]>;
+              [(set i64:$dst, (OpNode i64:$a, imm:$b))]>;
 }
 
 defm OR  : BITWISE<"or", or>;
@@ -1572,46 +1572,46 @@ defm AND : BITWISE<"and", and>;
 defm XOR : BITWISE<"xor", xor>;
 
 // PTX does not support mul on predicates, convert to and instructions
-def : Pat<(mul Int1Regs:$a, Int1Regs:$b), (ANDb1rr Int1Regs:$a, Int1Regs:$b)>;
-def : Pat<(mul Int1Regs:$a, (i1 imm:$b)), (ANDb1ri Int1Regs:$a, imm:$b)>;
+def : Pat<(mul i1:$a, i1:$b), (ANDb1rr Int1Regs:$a, Int1Regs:$b)>;
+def : Pat<(mul i1:$a, imm:$b), (ANDb1ri Int1Regs:$a, imm:$b)>;
 
 // These transformations were once reliably performed by instcombine, but thanks
 // to poison semantics they are no longer safe for LLVM IR, perform them here
 // instead.
-def : Pat<(select Int1Regs:$a, Int1Regs:$b, 0), (ANDb1rr Int1Regs:$a, Int1Regs:$b)>;
-def : Pat<(select Int1Regs:$a, 1, Int1Regs:$b), (ORb1rr Int1Regs:$a, Int1Regs:$b)>;
+def : Pat<(select i1:$a, i1:$b, 0), (ANDb1rr Int1Regs:$a, Int1Regs:$b)>;
+def : Pat<(select i1:$a, 1, i1:$b), (ORb1rr Int1Regs:$a, Int1Regs:$b)>;
 
 // Lower logical v2i16/v4i8 ops as bitwise ops on b32.
 foreach vt = [v2i16, v4i8] in {
-  def: Pat<(or (vt Int32Regs:$a), (vt Int32Regs:$b)),
+  def: Pat<(or vt:$a, vt:$b),
            (ORb32rr Int32Regs:$a, Int32Regs:$b)>;
-  def: Pat<(xor (vt Int32Regs:$a), (vt Int32Regs:$b)),
+  def: Pat<(xor vt:$a, vt:$b),
            (XORb32rr Int32Regs:$a, Int32Regs:$b)>;
-  def: Pat<(and (vt Int32Regs:$a), (vt Int32Regs:$b)),
+  def: Pat<(and vt:$a, vt:$b),
            (ANDb32rr Int32Regs:$a, Int32Regs:$b)>;
 
   // The constants get legalized into a bitcast from i32, so that's what we need
   // to match here.
-  def: Pat<(or Int32Regs:$a, (vt (bitconvert (i32 imm:$b)))),
+  def: Pat<(or vt:$a, (vt (bitconvert (i32 imm:$b)))),
            (ORb32ri Int32Regs:$a, imm:$b)>;
-  def: Pat<(xor Int32Regs:$a, (vt (bitconvert (i32 imm:$b)))),
+  def: Pat<(xor vt:$a, (vt (bitconvert (i32 imm:$b)))),
            (XORb32ri Int32Regs:$a, imm:$b)>;
-  def: Pat<(and Int32Regs:$a, (vt (bitconvert (i32 imm:$b)))),
+  def: Pat<(and vt:$a, (vt (bitconvert (i32 imm:$b)))),
            (ANDb32ri Int32Regs:$a, imm:$b)>;
 }
 
 def NOT1  : NVPTXInst<(outs Int1Regs:$dst), (ins Int1Regs:$src),
                       "not.pred \t$dst, $src;",
-                      [(set Int1Regs:$dst, (not Int1Regs:$src))]>;
+                      [(set i1:$dst, (not i1:$src))]>;
 def NOT16 : NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$src),
                       "not.b16 \t$dst, $src;",
-                      [(set Int16Regs:$dst, (not Int16Regs:$src))]>;
+                      [(set i16:$dst, (not i16:$src))]>;
 def NOT32 : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src),
                       "not.b32 \t$dst, $src;",
-                      [(set (i32 Int32Regs:$dst), (not (i32 Int32Regs:$src)))]>;
+                      [(set i32:$dst, (not i32:$src))]>;
 def NOT64 : NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$src),
                        "not.b64 \t$dst, $src;",
-                       [(set Int64Regs:$dst, (not Int64Regs:$src))]>;
+                       [(set i64:$dst, (not i64:$src))]>;
 
 // Template for left/right shifts.  Takes three operands,
 //   [dest (reg), src (reg), shift (reg or imm)].
@@ -1622,31 +1622,31 @@ multiclass SHIFT<string OpcStr, SDNode OpNode> {
    def i64rr :
      NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
-               [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 Int32Regs:$b)))]>;
+               [(set i64:$dst, (OpNode i64:$a, i32:$b))]>;
    def i64ri :
      NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a, i32imm:$b),
                !strconcat(OpcStr, "64 \t$dst, $a, $b;"),
-               [(set Int64Regs:$dst, (OpNode Int64Regs:$a, (i32 imm:$b)))]>;
+               [(set i64:$dst, (OpNode i64:$a, (i32 imm:$b)))]>;
    def i32rr :
      NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 Int32Regs:$b)))]>;
+               [(set i32:$dst, (OpNode i32:$a, i32:$b))]>;
    def i32ri :
      NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a, i32imm:$b),
                !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (i32 Int32Regs:$a), (i32 imm:$b)))]>;
+               [(set i32:$dst, (OpNode i32:$a, (i32 imm:$b)))]>;
    def i32ii :
      NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$a, i32imm:$b),
                !strconcat(OpcStr, "32 \t$dst, $a, $b;"),
-               [(set Int32Regs:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
+               [(set i32:$dst, (OpNode (i32 imm:$a), (i32 imm:$b)))]>;
    def i16rr :
      NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, Int32Regs:$b),
                !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 Int32Regs:$b)))]>;
+               [(set i16:$dst, (OpNode i16:$a, i32:$b))]>;
    def i16ri :
      NVPTXInst<(outs Int16Regs:$dst), (ins Int16Regs:$a, i32imm:$b),
                !strconcat(OpcStr, "16 \t$dst, $a, $b;"),
-               [(set Int16Regs:$dst, (OpNode Int16Regs:$a, (i32 imm:$b)))]>;
+               [(set i16:$dst, (OpNode i16:$a, (i32 imm:$b)))]>;
 }
 
 defm SHL : SHIFT<"shl.b", shl>;
@@ -1657,11 +1657,11 @@ defm SRL : SHIFT<"shr.u", srl>;
 def BREV32 :
   NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$a),
              "brev.b32 \t$dst, $a;",
-             [(set Int32Regs:$dst, (bitreverse (i32 Int32Regs:$a)))]>;
+             [(set i32:$dst, (bitreverse i32:$a))]>;
 def BREV64 :
   NVPTXInst<(outs Int64Regs:$dst), (ins Int64Regs:$a),
              "brev.b64 \t$dst, $a;",
-             [(set Int64Regs:$dst, (bitreverse Int64Regs:$a))]>;
+             [(set i64:$dst, (bitreverse i64:$a))]>;
 
 
 //
@@ -1694,17 +1694,17 @@ multiclass BFE<string Instr, ValueType T, RegisterClass RC> {
     : NVPTXInst<(outs RC:$d),
                 (ins RC:$a, Int32Regs:$b, Int32Regs:$c),
                 !strconcat(Instr, " \t$d, $a, $b, $c;"),
-                [(set (T RC:$d), (bfe (T RC:$a), (i32 Int32Regs:$b), (i32 Int32Regs:$c)))]>;
+                [(set T:$d, (bfe T:$a, i32:$b, i32:$c))]>;
   def rri
     : NVPTXInst<(outs RC:$d),
                 (ins RC:$a, Int32Regs:$b, i32imm:$c),
                 !strconcat(Instr, " \t$d, $a, $b, $c;"),
-                [(set (T RC:$d), (bfe (T RC:$a), (i32 Int32Regs:$b), (i32 imm:$c)))]>;
+                [(set T:$d, (bfe T:$a, i32:$b, imm:$c))]>;
   def rii
     : NVPTXInst<(outs RC:$d),
                 (ins RC:$a, i32imm:$b, i32imm:$c),
                 !strconcat(Instr, " \t$d, $a, $b, $c;"),
-                [(set (T RC:$d), (bfe (T RC:$a), (i32 imm:$b), (i32 imm:$c)))]>;
+                [(set T:$d, (bfe T:$a, imm:$b, imm:$c))]>;
 }
 
 multiclass BFI<string Instr, ValueType T, RegisterClass RC, Operand ImmCls> {
@@ -1712,32 +1712,32 @@ multiclass BFI<string Instr, ValueType T, RegisterClass RC, Operand ImmCls> {
     : NVPTXInst<(outs RC:$f),
                 (ins RC:$a, RC:$b, Int32Regs:$c, Int32Regs:$d),
                 !strconcat(Instr, " \t$f, $a, $b, $c, $d;"),
-                [(set (T RC:$f), (bfi (T RC:$a), (T RC:$b), (i32 Int32Regs:$c), (i32 Int32Regs:$d)))]>;
+                [(set T:$f, (bfi T:$a, T:$b, i32:$c, i32:$d))]>;
   def rrri
     : NVPTXInst<(outs RC:$f),
                 (ins RC:$a, RC:$b, Int32Regs:$c, i32imm:$d),
                 !strconcat(Instr, " \t$f, $a, $b, $c, $d;"),
-                [(set (T RC:$f), (bfi (T RC:$a), (T RC:$b), (i32 Int32Regs:$c), (i32 imm:$d)))]>;
+                [(set T:$f, (bfi T:$a, T:$b, i32:$c, imm:$d))]>;
   def rrii
     : NVPTXInst<(outs RC:$f),
                 (ins RC:$a, RC:$b, i32imm:$c, i32imm:$d),
                 !strconcat(Instr, " \t$f, $a, $b, $c, $d;"),
-                [(set (T RC:$f), (bfi (T RC:$a), (T RC:$b), (i32 imm:$c), (i32 imm:$d)))]>;
+                [(set T:$f, (bfi T:$a, T:$b, imm:$c, imm:$d))]>;
   def irrr
     : NVPTXInst<(outs RC:$f),
                 (ins ImmCls:$a, RC:$b, Int32Regs:$c, Int32Regs:$d),
                 !strconcat(Instr, " \t$f, $a, $b, $c, $d;"),
-                [(set (T RC:$f), (bfi (T imm:$a), (T RC:$b), (i32 Int32Regs:$c), (i32 Int32Regs:$d)))]>;
+                [(set T:$f, (bfi (T imm:$a), T:$b, i32:$c, i32:$d))]>;
   def irri
     : NVPTXInst<(outs RC:$f),
                 (ins ImmCls:$a, RC:$b, Int32Regs:$c, i32imm:$d),
                 !strconcat(Instr, " \t$f, $a, $b, $c, $d;"),
-                [(set (T RC:$f), (bfi (T imm:$a), (T RC:$b), (i32 Int32Regs:$c), (i32 imm:$d)))]>;
+                [(set T:$f, (bfi (T imm:$a), T:$b, i32:$c, imm:$d))]>;
   def irii
     : NVPTXInst<(outs RC:$f),
                 (ins ImmCls:$a, RC:$b, i32imm:$c, i32imm:$d),
                 !strconcat(Instr, " \t$f, $a, $b, $c, $d;"),
-                [(set (T RC:$f), (bfi (T imm:$a), (T RC:$b), (i32 imm:$c), (i32 imm:$d)))]>;
+                [(set T:$f, (bfi (T imm:$a), T:$b, imm:$c, imm:$d))]>;
 }
 
 def Hexu32imm : Operand<i32> {
@@ -1749,17 +1749,17 @@ multiclass PRMT<ValueType T, RegisterClass RC> {
     : NVPTXInst<(outs RC:$d),
                 (ins RC:$a, Int32Regs:$b, Int32Regs:$c, PrmtMode:$mode),
                 !strconcat("prmt.b32${mode}", " \t$d, $a, $b, $c;"),
-                [(set (T RC:$d), (prmt (T RC:$a), (T RC:$b), (i32 Int32Regs:$c), imm:$mode))]>;
+                [(set T:$d, (prmt T:$a, T:$b, i32:$c, imm:$mode))]>;
   def rri
     : NVPTXInst<(outs RC:$d),
                 (ins RC:$a, Int32Regs:$b, Hexu32imm:$c, PrmtMode:$mode),
                 !strconcat("prmt.b32${mode}", " \t$d, $a, $b, $c;"),
-                [(set (T RC:$d), (prmt (T RC:$a), (T RC:$b), (i32 imm:$c), imm:$mode))]>;
+                [(set T:$d, (prmt T:$a, T:$b, imm:$c, imm:$mode))]>;
   def rii
     : NVPTXInst<(outs RC:$d),
                 (ins RC:$a, i32imm:$b, Hexu32imm:$c, PrmtMode:$mode),
                 !strconcat("prmt.b32${mode}", " \t$d, $a, $b, $c;"),
-                [(set (T RC:$d), (prmt (T RC:$a), (T imm:$b), (i32 imm:$c), imm:$mode))]>;
+                [(set T:$d, (prmt T:$a, imm:$b, imm:$c, imm:$mode))]>;
 }
 
 let hasSideEffects = false in {
@@ -1780,34 +1780,34 @@ let hasSideEffects = false in {
 
 
 // byte extraction + signed/unsigned extension to i32.
-def : Pat<(i32 (sext_inreg (bfe (i32 Int32Regs:$s),  (i32 Int32Regs:$o), 8), i8)),
+def : Pat<(i32 (sext_inreg (bfe i32:$s, i32:$o, 8), i8)),
           (BFE_S32rri Int32Regs:$s, Int32Regs:$o, 8)>;
-def : Pat<(i32 (sext_inreg (bfe (i32 Int32Regs:$s),  (i32 imm:$o), 8), i8)),
+def : Pat<(i32 (sext_inreg (bfe i32:$s, imm:$o, 8), i8)),
           (BFE_S32rii Int32Regs:$s, imm:$o, 8)>;
-def : Pat<(i32 (and (bfe (i32 Int32Regs:$s),  (i32 Int32Regs:$o), 8), 255)),
+def : Pat<(i32 (and (bfe i32:$s, i32:$o, 8), 255)),
           (BFE_U32rri Int32Regs:$s, Int32Regs:$o, 8)>;
-def : Pat<(i32 (and (bfe (i32 Int32Regs:$s),  (i32 imm:$o), 8), 255)),
+def : Pat<(i32 (and (bfe i32:$s, imm:$o, 8), 255)),
           (BFE_U32rii Int32Regs:$s, imm:$o, 8)>;
 
 // byte extraction + signed extension to i16
-def : Pat<(i16 (sext_inreg (trunc (bfe (i32 Int32Regs:$s),  (i32 imm:$o), 8)), i8)),
-          (CVT_s8_s32 (BFE_S32rii Int32Regs:$s, imm:$o, 8), CvtNONE)>;
+def : Pat<(i16 (sext_inreg (trunc (bfe i32:$s, imm:$o, 8)), i8)),
+          (CVT_s8_s32 (BFE_S32rii i32:$s, imm:$o, 8), CvtNONE)>;
 
 
 // Byte extraction via shift/trunc/sext
-def : Pat<(i16 (sext_inreg (trunc Int32Regs:$s), i8)),
+def : Pat<(i16 (sext_inreg (trunc i32:$s), i8)),
           (CVT_s8_s32 Int32Regs:$s, CvtNONE)>;
-def : Pat<(i16 (sext_inreg (trunc (srl (i32 Int32Regs:$s),  (i32 imm:$o))), i8)),
+def : Pat<(i16 (sext_inreg (trunc (srl i32:$s,  (i32 imm:$o))), i8)),
           (CVT_s8_s32 (BFE_S32rii Int32Regs:$s, imm:$o, 8), CvtNONE)>;
-def : Pat<(sext_inreg (srl (i32 Int32Regs:$s),  (i32 imm:$o)), i8),
+def : Pat<(sext_inreg (srl i32:$s,  (i32 imm:$o)), i8),
           (BFE_S32rii Int32Regs:$s, imm:$o, 8)>;
-def : Pat<(i16 (sra (i16 (trunc Int32Regs:$s)), (i32 8))),
+def : Pat<(i16 (sra (i16 (trunc i32:$s)), (i32 8))),
           (CVT_s8_s32 (BFE_S32rii Int32Regs:$s, 8, 8), CvtNONE)>;
-def : Pat<(sext_inreg (srl (i64 Int64Regs:$s),  (i32 imm:$o)), i8),
+def : Pat<(sext_inreg (srl i64:$s,  (i32 imm:$o)), i8),
           (BFE_S64rii Int64Regs:$s, imm:$o, 8)>;
-def : Pat<(i16 (sext_inreg (trunc Int64Regs:$s), i8)),
+def : Pat<(i16 (sext_inreg (trunc i64:$s), i8)),
           (CVT_s8_s64 Int64Regs:$s, CvtNONE)>;
-def : Pat<(i16 (sext_inreg (trunc (srl (i64 Int64Regs:$s),  (i32 imm:$o))), i8)),
+def : Pat<(i16 (sext_inreg (trunc (srl i64:$s,  (i32 imm:$o))), i8)),
           (CVT_s8_s64 (BFE_S64rii Int64Regs:$s, imm:$o, 8), CvtNONE)>;
 
 //-----------------------------------
@@ -1948,10 +1948,10 @@ def Wrapper    : SDNode<"NVPTXISD::Wrapper", SDTWrapper>;
 // Load a memory address into a u32 or u64 register.
 def MOV_ADDR : NVPTXInst<(outs Int32Regs:$dst), (ins imem:$a),
                          "mov.u32 \t$dst, $a;",
-                         [(set Int32Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+                         [(set i32:$dst, (Wrapper tglobaladdr:$a))]>;
 def MOV_ADDR64 : NVPTXInst<(outs Int64Regs:$dst), (ins imem:$a),
                            "mov.u64 \t$dst, $a;",
-                           [(set Int64Regs:$dst, (Wrapper tglobaladdr:$a))]>;
+                           [(set i64:$dst, (Wrapper tglobaladdr:$a))]>;
 
 // Get pointer to local stack.
 let hasSideEffects = false in {
@@ -1993,16 +1993,16 @@ let IsSimpleMove=1, hasSideEffects=0 in {
 
 def IMOV1ri : NVPTXInst<(outs Int1Regs:$dst), (ins i1imm:$src),
                         "mov.pred \t$dst, $src;",
-                        [(set Int1Regs:$dst, imm:$src)]>;
+                        [(set i1:$dst, imm:$src)]>;
 def IMOV16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
                          "mov.u16 \t$dst, $src;",
-                         [(set Int16Regs:$dst, imm:$src)]>;
+                         [(set i16:$dst, imm:$src)]>;
 def IMOV32ri : NVPTXInst<(outs Int32Regs:$dst), (ins i32imm:$src),
                          "mov.u32 \t$dst, $src;",
-                         [(set (i32 Int32Regs:$dst), imm:$src)]>;
+                         [(set i32:$dst, imm:$src)]>;
 def IMOV64ri : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
                         "mov.u64 \t$dst, $src;",
-                        [(set Int64Regs:$dst, imm:$src)]>;
+                        [(set i64:$dst, imm:$src)]>;
 
 def IMOVB16ri : NVPTXInst<(outs Int16Regs:$dst), (ins i16imm:$src),
                          "mov.b16 \t$dst, $src;", []>;
@@ -2013,10 +2013,10 @@ def IMOVB64ri : NVPTXInst<(outs Int64Regs:$dst), (ins i64imm:$src),
 
 def FMOV32ri : NVPTXInst<(outs Float32Regs:$dst), (ins f32imm:$src),
                          "mov.f32 \t$dst, $src;",
-                         [(set Float32Regs:$dst, fpimm:$src)]>;
+                         [(set f32:$dst, fpimm:$src)]>;
 def FMOV64ri : NVPTXInst<(outs Float64Regs:$dst), (ins f64imm:$src),
                          "mov.f64 \t$dst, $src;",
-                         [(set Float64Regs:$dst, fpimm:$src)]>;
+                         [(set f64:$dst, fpimm:$src)]>;
 
 def : Pat<(i32 (Wrapper texternalsym:$dst)), (IMOV32ri texternalsym:$dst)>;
 def : Pat<(i64 (Wrapper texternalsym:$dst)), (IMOV64ri texternalsym:$dst)>;
@@ -2024,10 +2024,10 @@ def : Pat<(i64 (Wrapper texternalsym:$dst)), (IMOV64ri texternalsym:$dst)>;
 //---- Copy Frame Index ----
 def LEA_ADDRi :   NVPTXInst<(outs Int32Regs:$dst), (ins MEMri:$addr),
                             "add.u32 \t$dst, ${addr:add};",
-                            [(set Int32Regs:$dst, ADDRri:$addr)]>;
+                            [(set i32:$dst, ADDRri:$addr)]>;
 def LEA_ADDRi64 : NVPTXInst<(outs Int64Regs:$dst), (ins MEMri64:$addr),
                             "add.u64 \t$dst, ${addr:add};",
-                            [(set Int64Regs:$dst, ADDRri64:$addr)]>;
+                            [(set i64:$dst, ADDRri64:$addr)]>;
 
 //-----------------------------------
 // Comparison and Selection
@@ -2055,45 +2055,45 @@ multiclass ISET_FORMAT<PatFrag OpNode, PatLeaf Mode,
   // i16 -> pred
   def : Pat<(i1 (OpNode i16:$a, i16:$b)),
             (setp_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
-  def : Pat<(i1 (OpNode Int16Regs:$a, imm:$b)),
+  def : Pat<(i1 (OpNode i16:$a, imm:$b)),
             (setp_16ri Int16Regs:$a, imm:$b, Mode)>;
-  def : Pat<(i1 (OpNode imm:$a, Int16Regs:$b)),
+  def : Pat<(i1 (OpNode imm:$a, i16:$b)),
             (setp_16ir imm:$a, Int16Regs:$b, Mode)>;
   // i32 -> pred
   def : Pat<(i1 (OpNode i32:$a, i32:$b)),
             (setp_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
-  def : Pat<(i1 (OpNode (i32 Int32Regs:$a), imm:$b)),
+  def : Pat<(i1 (OpNode i32:$a, imm:$b)),
             (setp_32ri Int32Regs:$a, imm:$b, Mode)>;
-  def : Pat<(i1 (OpNode imm:$a, (i32 Int32Regs:$b))),
+  def : Pat<(i1 (OpNode imm:$a, i32:$b)),
             (setp_32ir imm:$a, Int32Regs:$b, Mode)>;
   // i64 -> pred
-  def : Pat<(i1 (OpNode Int64Regs:$a, Int64Regs:$b)),
+  def : Pat<(i1 (OpNode i64:$a, i64:$b)),
             (setp_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
-  def : Pat<(i1 (OpNode Int64Regs:$a, imm:$b)),
+  def : Pat<(i1 (OpNode i64:$a, imm:$b)),
             (setp_64ri Int64Regs:$a, imm:$b, Mode)>;
-  def : Pat<(i1 (OpNode imm:$a, Int64Regs:$b)),
+  def : Pat<(i1 (OpNode imm:$a, i64:$b)),
             (setp_64ir imm:$a, Int64Regs:$b, Mode)>;
 
   // i16 -> i32
   def : Pat<(i32 (OpNode i16:$a, i16:$b)),
             (set_16rr Int16Regs:$a, Int16Regs:$b, Mode)>;
-  def : Pat<(i32 (OpNode Int16Regs:$a, imm:$b)),
+  def : Pat<(i32 (OpNode i16:$a, imm:$b)),
             (set_16ri Int16Regs:$a, imm:$b, Mode)>;
-  def : Pat<(i32 (OpNode imm:$a, Int16Regs:$b)),
+  def : Pat<(i32 (OpNode imm:$a, i16:$b)),
             (set_16ir imm:$a, Int16Regs:$b, Mode)>;
   // i32 -> i32
   def : Pat<(i32 (OpNode i32:$a, i32:$b)),
             (set_32rr Int32Regs:$a, Int32Regs:$b, Mode)>;
-  def : Pat<(i32 (OpNode (i32 Int32Regs:$a), imm:$b)),
+  def : Pat<(i32 (OpNode i32:$a, imm:$b)),
             (set_32ri Int32Regs:$a, imm:$b, Mode)>;
-  def : Pat<(i32 (OpNode imm:$a, (i32 Int32Regs:$b))),
+  def : Pat<(i32 (OpNode imm:$a, i32:$b)),
             (set_32ir imm:$a, Int32Regs:$b, Mode)>;
   // i64 -> i32
-  def : Pat<(i32 (OpNode Int64Regs:$a, Int64Regs:$b)),
+  def : Pat<(i32 (OpNode i64:$a, Int64Regs:$b)),
             (set_64rr Int64Regs:$a, Int64Regs:$b, Mode)>;
-  def : Pat<(i32 (OpNode Int64Regs:$a, imm:$b)),
+  def : Pat<(i32 (OpNode i64:$a, imm:$b)),
             (set_64ri Int64Regs:$a, imm:$b, Mode)>;
-  def : Pat<(i32 (OpNode imm:$a, Int64Regs:$b)),
+  def : Pat<(i32 (OpNode imm:$a, i64:$b)),
             (set_64ir imm:$a, Int64Regs:$b, Mode)>;
 }
 
@@ -2200,142 +2200,142 @@ def: Pat<(setne (i16 (and (trunc (bfe Int32Regs:$a, imm:$oa, 8)), 255)),
          (SETP_u32rr (BFE_U32rii $a, imm:$oa, 8), (BFE_U32rii $b, imm:$ob, 8), CmpNE)>;
 
 // i1 compare -> i32
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+def : Pat<(i32 (setne i1:$a, i1:$b)),
           (SELP_u32ii -1, 0, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
-def : Pat<(i32 (setne Int1Regs:$a, Int1Regs:$b)),
+def : Pat<(i32 (setne i1:$a, i1:$b)),
           (SELP_u32ii 0, -1, (XORb1rr Int1Regs:$a, Int1Regs:$b))>;
 
 
 
 multiclass FSET_FORMAT<PatFrag OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
   // f16 -> pred
-  def : Pat<(i1 (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b))),
+  def : Pat<(i1 (OpNode f16:$a, f16:$b)),
             (SETP_f16rr Int16Regs:$a, Int16Regs:$b, ModeFTZ)>,
         Requires<[useFP16Math,doF32FTZ]>;
-  def : Pat<(i1 (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b))),
+  def : Pat<(i1 (OpNode f16:$a, f16:$b)),
             (SETP_f16rr Int16Regs:$a, Int16Regs:$b, Mode)>,
         Requires<[useFP16Math]>;
-  def : Pat<(i1 (OpNode (f16 Int16Regs:$a), fpimm:$b)),
+  def : Pat<(i1 (OpNode f16:$a, fpimm:$b)),
             (SETP_f16rr Int16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
         Requires<[useFP16Math,doF32FTZ]>;
-  def : Pat<(i1 (OpNode (f16 Int16Regs:$a), fpimm:$b)),
+  def : Pat<(i1 (OpNode f16:$a, fpimm:$b)),
             (SETP_f16rr Int16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
         Requires<[useFP16Math]>;
-  def : Pat<(i1 (OpNode fpimm:$a, (f16 Int16Regs:$b))),
+  def : Pat<(i1 (OpNode fpimm:$a, f16:$b)),
             (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Int16Regs:$b, ModeFTZ)>,
         Requires<[useFP16Math,doF32FTZ]>;
-  def : Pat<(i1 (OpNode fpimm:$a, (f16 Int16Regs:$b))),
+  def : Pat<(i1 (OpNode fpimm:$a, f16:$b)),
             (SETP_f16rr (LOAD_CONST_F16 fpimm:$a), Int16Regs:$b, Mode)>,
         Requires<[useFP16Math]>;
 
   // bf16 -> pred
-  def : Pat<(i1 (OpNode (bf16 Int16Regs:$a), (bf16 Int16Regs:$b))),
+  def : Pat<(i1 (OpNode bf16:$a, bf16:$b)),
             (SETP_bf16rr Int16Regs:$a, Int16Regs:$b, ModeFTZ)>,
         Requires<[hasBF16Math,doF32FTZ]>;
-  def : Pat<(i1 (OpNode (bf16 Int16Regs:$a), (bf16 Int16Regs:$b))),
+  def : Pat<(i1 (OpNode bf16:$a, bf16:$b)),
             (SETP_bf16rr Int16Regs:$a, Int16Regs:$b, Mode)>,
         Requires<[hasBF16Math]>;
-  def : Pat<(i1 (OpNode (bf16 Int16Regs:$a), fpimm:$b)),
+  def : Pat<(i1 (OpNode bf16:$a, fpimm:$b)),
             (SETP_bf16rr Int16Regs:$a, (LOAD_CONST_BF16 fpimm:$b), ModeFTZ)>,
         Requires<[hasBF16Math,doF32FTZ]>;
-  def : Pat<(i1 (OpNode (bf16 Int16Regs:$a), fpimm:$b)),
+  def : Pat<(i1 (OpNode bf16:$a, fpimm:$b)),
             (SETP_bf16rr Int16Regs:$a, (LOAD_CONST_BF16 fpimm:$b), Mode)>,
         Requires<[hasBF16Math]>;
-  def : Pat<(i1 (OpNode fpimm:$a, (bf16 Int16Regs:$b))),
+  def : Pat<(i1 (OpNode fpimm:$a, bf16:$b)),
             (SETP_bf16rr (LOAD_CONST_BF16 fpimm:$a), Int16Regs:$b, ModeFTZ)>,
         Requires<[hasBF16Math,doF32FTZ]>;
-  def : Pat<(i1 (OpNode fpimm:$a, (bf16 Int16Regs:$b))),
+  def : Pat<(i1 (OpNode fpimm:$a, bf16:$b)),
             (SETP_bf16rr (LOAD_CONST_BF16 fpimm:$a), Int16Regs:$b, Mode)>,
         Requires<[hasBF16Math]>;
 
   // f32 -> pred
-  def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+  def : Pat<(i1 (OpNode f32:$a, f32:$b)),
             (SETP_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
         Requires<[doF32FTZ]>;
-  def : Pat<(i1 (OpNode Float32Regs:$a, Float32Regs:$b)),
+  def : Pat<(i1 (OpNode f32:$a, f32:$b)),
             (SETP_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
   def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
             (SETP_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
         Requires<[doF32FTZ]>;
-  def : Pat<(i1 (OpNode Float32Regs:$a, fpimm:$b)),
+  def : Pat<(i1 (OpNode f32:$a, fpimm:$b)),
             (SETP_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
-  def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+  def : Pat<(i1 (OpNode fpimm:$a, f32:$b)),
             (SETP_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
         Requires<[doF32FTZ]>;
-  def : Pat<(i1 (OpNode fpimm:$a, Float32Regs:$b)),
+  def : Pat<(i1 (OpNode fpimm:$a, f32:$b)),
             (SETP_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
 
   // f64 -> pred
-  def : Pat<(i1 (OpNode Float64Regs:$a, Float64Regs:$b)),
+  def : Pat<(i1 (OpNode f64:$a, f64:$b)),
             (SETP_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
-  def : Pat<(i1 (OpNode Float64Regs:$a, fpimm:$b)),
+  def : Pat<(i1 (OpNode f64:$a, fpimm:$b)),
             (SETP_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
-  def : Pat<(i1 (OpNode fpimm:$a, Float64Regs:$b)),
+  def : Pat<(i1 (OpNode fpimm:$a, f64:$b)),
             (SETP_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
 
   // f16 -> i32
-  def : Pat<(i32 (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b))),
+  def : Pat<(i32 (OpNode f16:$a, f16:$b)),
             (SET_f16rr Int16Regs:$a, Int16Regs:$b, ModeFTZ)>,
         Requires<[useFP16Math, doF32FTZ]>;
-  def : Pat<(i32 (OpNode (f16 Int16Regs:$a), (f16 Int16Regs:$b))),
+  def : Pat<(i32 (OpNode f16:$a, f16:$b)),
             (SET_f16rr Int16Regs:$a, Int16Regs:$b, Mode)>,
         Requires<[useFP16Math]>;
-  def : Pat<(i32 (OpNode (f16 Int16Regs:$a), fpimm:$b)),
+  def : Pat<(i32 (OpNode f16:$a, fpimm:$b)),
             (SET_f16rr Int16Regs:$a, (LOAD_CONST_F16 fpimm:$b), ModeFTZ)>,
         Requires<[useFP16Math, doF32FTZ]>;
-  def : Pat<(i32 (OpNode (f16 Int16Regs:$a), fpimm:$b)),
+  def : Pat<(i32 (OpNode f16:$a, fpimm:$b)),
             (SET_f16rr Int16Regs:$a, (LOAD_CONST_F16 fpimm:$b), Mode)>,
         Requires<[useFP16Math]>;
-  def : Pat<(i32 (OpNode fpimm:$a, (f16 Int16Regs:$b))),
+  def : Pat<(i32 (OpNode fpimm:$a, f16:$b)),
             (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Int16Regs:$b, ModeFTZ)>,
         Requires<[useFP16Math, doF32FTZ]>;
-  def : Pat<(i32 (OpNode fpimm:$a, (f16 Int16Regs:$b))),
+  def : Pat<(i32 (OpNode fpimm:$a, f16:$b)),
             (SET_f16ir (LOAD_CONST_F16 fpimm:$a), Int16Regs:$b, Mode)>,
         Requires<[useFP16Math]>;
 
   // bf16 -> i32
-  def : Pat<(i32 (OpNode (bf16 Int16Regs:$a), (bf16 Int16Regs:$b))),
+  def : Pat<(i32 (OpNode bf16:$a, bf16:$b)),
             (SET_bf16rr Int16Regs:$a, Int16Regs:$b, ModeFTZ)>,
         Requires<[hasBF16Math, doF32FTZ]>;
-  def : Pat<(i32 (OpNode (bf16 Int16Regs:$a), (bf16 Int16Regs:$b))),
+  def : Pat<(i32 (OpNode bf16:$a, bf16:$b)),
             (SET_bf16rr Int16Regs:$a, Int16Regs:$b, Mode)>,
         Requires<[hasBF16Math]>;
-  def : Pat<(i32 (OpNode (bf16 Int16Regs:$a), fpimm:$b)),
+  def : Pat<(i32 (OpNode bf16:$a, fpimm:$b)),
             (SET_bf16rr Int16Regs:$a, (LOAD_CONST_BF16 fpimm:$b), ModeFTZ)>,
         Requires<[hasBF16Math, doF32FTZ]>;
-  def : Pat<(i32 (OpNode (bf16 Int16Regs:$a), fpimm:$b)),
+  def : Pat<(i32 (OpNode bf16:$a, fpimm:$b)),
             (SET_bf16rr Int16Regs:$a, (LOAD_CONST_BF16 fpimm:$b), Mode)>,
         Requires<[hasBF16Math]>;
-  def : Pat<(i32 (OpNode fpimm:$a, (bf16 Int16Regs:$b))),
+  def : Pat<(i32 (OpNode fpimm:$a, bf16:$b)),
             (SET_bf16ir (LOAD_CONST_BF16 fpimm:$a), Int16Regs:$b, ModeFTZ)>,
         Requires<[hasBF16Math, doF32FTZ]>;
-  def : Pat<(i32 (OpNode fpimm:$a, (bf16 Int16Regs:$b))),
+  def : Pat<(i32 (OpNode fpimm:$a, bf16:$b)),
             (SET_bf16ir (LOAD_CONST_BF16 fpimm:$a), Int16Regs:$b, Mode)>,
         Requires<[hasBF16Math]>;
 
   // f32 -> i32
-  def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+  def : Pat<(i32 (OpNode f32:$a, f32:$b)),
             (SET_f32rr Float32Regs:$a, Float32Regs:$b, ModeFTZ)>,
         Requires<[doF32FTZ]>;
-  def : Pat<(i32 (OpNode Float32Regs:$a, Float32Regs:$b)),
+  def : Pat<(i32 (OpNode f32:$a, f32:$b)),
             (SET_f32rr Float32Regs:$a, Float32Regs:$b, Mode)>;
-  def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+  def : Pat<(i32 (OpNode f32:$a, fpimm:$b)),
             (SET_f32ri Float32Regs:$a, fpimm:$b, ModeFTZ)>,
         Requires<[doF32FTZ]>;
-  def : Pat<(i32 (OpNode Float32Regs:$a, fpimm:$b)),
+  def : Pat<(i32 (OpNode f32:$a, fpimm:$b)),
             (SET_f32ri Float32Regs:$a, fpimm:$b, Mode)>;
-  def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+  def : Pat<(i32 (OpNode fpimm:$a, f32:$b)),
             (SET_f32ir fpimm:$a, Float32Regs:$b, ModeFTZ)>,
         Requires<[doF32FTZ]>;
-  def : Pat<(i32 (OpNode fpimm:$a, Float32Regs:$b)),
+  def : Pat<(i32 (OpNode fpimm:$a, f32:$b)),
             (SET_f32ir fpimm:$a, Float32Regs:$b, Mode)>;
 
   // f64 -> i32
-  def : Pat<(i32 (OpNode Float64Regs:$a, Float64Regs:$b)),
+  def : Pat<(i32 (OpNode f64:$a, f64:$b)),
             (SET_f64rr Float64Regs:$a, Float64Regs:$b, Mode)>;
-  def : Pat<(i32 (OpNode Float64Regs:$a, fpimm:$b)),
+  def : Pat<(i32 (OpNode f64:$a, fpimm:$b)),
             (SET_f64ri Float64Regs:$a, fpimm:$b, Mode)>;
-  def : Pat<(i32 (OpNode fpimm:$a, Float64Regs:$b)),
+  def : Pat<(i32 (OpNode fpimm:$a, f64:$b)),
             (SET_f64ir fpimm:$a, Float64Regs:$b, Mode)>;
 }
 
@@ -2714,7 +2714,7 @@ def CallVoidInst :      NVPTXInst<(outs), (ins imem:$addr), "$addr, ",
 def CallVoidInstReg :   NVPTXInst<(outs), (ins Int32Regs:$addr), "$addr, ",
                                   [(CallVoid i32:$addr)]>;
 def CallVoidInstReg64 : NVPTXInst<(outs), (ins Int64Regs:$addr), "$addr, ",
-                                  [(CallVoid Int64Regs:$addr)]>;
+                                  [(CallVoid i64:$addr)]>;
 def PrototypeInst :     NVPTXInst<(outs), (ins i32imm:$val), ", prototype_$val;",
                                   [(Prototype (i32 imm:$val))]>;
 
@@ -2747,7 +2747,7 @@ def DeclareScalarRegInst :
 class MoveParamInst<ValueType T, NVPTXRegClass regclass, string asmstr> :
   NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
             !strconcat("mov", asmstr, " \t$dst, $src;"),
-            [(set (T regclass:$dst), (MoveParam (T regclass:$src)))]>;
+            [(set T:$dst, (MoveParam T:$src))]>;
 
 class MoveParamSymbolInst<NVPTXRegClass regclass, Operand srcty, ValueType vt,
                           string asmstr> :
@@ -2782,7 +2782,7 @@ def PseudoUseParamF32 : PseudoUseParamInst<Float32Regs, f32>;
 class ProxyRegInst<string SzStr, ValueType T, NVPTXRegClass regclass> :
   NVPTXInst<(outs regclass:$dst), (ins regclass:$src),
             !strconcat("mov.", SzStr, " \t$dst, $src;"),
-            [(set (T regclass:$dst), (ProxyReg (T regclass:$src)))]>;
+            [(set T:$dst, (ProxyReg T:$src))]>;
 
 def ProxyRegI1    : ProxyRegInst<"pred", i1, Int1Regs>;
 def ProxyRegI16   : ProxyRegInst<"b16",  i16, Int16Regs>;
@@ -3090,7 +3090,7 @@ class F_BITCONVERT<string SzStr, ValueType TIn, ValueType TOut,
   NVPTXRegClass regclassOut = ValueToRegClass<TOut>.ret> :
            NVPTXInst<(outs regclassOut:$d), (ins regclassIn:$a),
            !strconcat("mov.b", SzStr, " \t$d, $a;"),
-     [(set (TOut regclassOut:$d), (bitconvert (TIn regclassIn:$a)))]>;
+     [(set TOut:$d, (bitconvert TIn:$a))]>;
 
 def BITCONVERT_32_I2F : F_BITCONVERT<"32", i32, f32>;
 def BITCONVERT_32_F2I : F_BITCONVERT<"32", f32, i32>;
@@ -3100,15 +3100,15 @@ def BITCONVERT_64_F2I : F_BITCONVERT<"64", f64, i64>;
 foreach vt = [v2f16, v2bf16, v2i16, v4i8] in {
 def: Pat<(vt (bitconvert (f32 Float32Regs:$a))),
          (BITCONVERT_32_F2I Float32Regs:$a)>;
-def: Pat<(f32 (bitconvert (vt Int32Regs:$a))),
+def: Pat<(f32 (bitconvert vt:$a)),
          (BITCONVERT_32_I2F Int32Regs:$a)>;
 }
 foreach vt = [f16, bf16] in {
 def: Pat<(vt (bitconvert (i16 UInt16Const:$a))),
          (IMOVB16ri UInt16Const:$a)>;
-def: Pat<(vt (bitconvert (i16 Int16Regs:$a))),
+def: Pat<(vt (bitconvert i16:$a)),
          (ProxyRegI16 Int16Regs:$a)>;
-def: Pat<(i16 (bitconvert (vt Int16Regs:$a))),
+def: Pat<(i16 (bitconvert vt:$a)),
          (ProxyRegI16 Int16Regs:$a)>;
 }
 
@@ -3129,279 +3129,279 @@ foreach ta = [v2f16, v2bf16, v2i16, v4i8, i32] in {
 // and then cvt to floating-point.
 
 // sint -> f16
-def : Pat<(f16 (sint_to_fp Int1Regs:$a)),
+def : Pat<(f16 (sint_to_fp i1:$a)),
           (CVT_f16_s32 (SELP_s32ii -1, 0, Int1Regs:$a), CvtRN)>;
 def : Pat<(f16 (sint_to_fp Int16Regs:$a)),
-          (CVT_f16_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int32Regs:$a)),
-          (CVT_f16_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f16 (sint_to_fp Int64Regs:$a)),
-          (CVT_f16_s64 Int64Regs:$a, CvtRN)>;
+          (CVT_f16_s16 i16:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp i32:$a)),
+          (CVT_f16_s32 i32:$a, CvtRN)>;
+def : Pat<(f16 (sint_to_fp i64:$a)),
+          (CVT_f16_s64 i64:$a, CvtRN)>;
 
 // uint -> f16
-def : Pat<(f16 (uint_to_fp Int1Regs:$a)),
+def : Pat<(f16 (uint_to_fp i1:$a)),
           (CVT_f16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
 def : Pat<(f16 (uint_to_fp Int16Regs:$a)),
-          (CVT_f16_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int32Regs:$a)),
-          (CVT_f16_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f16 (uint_to_fp Int64Regs:$a)),
-          (CVT_f16_u64 Int64Regs:$a, CvtRN)>;
+          (CVT_f16_u16 i16:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp i32:$a)),
+          (CVT_f16_u32 i32:$a, CvtRN)>;
+def : Pat<(f16 (uint_to_fp i64:$a)),
+          (CVT_f16_u64 i64:$a, CvtRN)>;
 
 // sint -> bf16
-def : Pat<(bf16 (sint_to_fp Int1Regs:$a)),
+def : Pat<(bf16 (sint_to_fp i1:$a)),
           (CVT_bf16_s32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
-def : Pat<(bf16 (sint_to_fp Int16Regs:$a)),
-          (CVT_bf16_s16 Int16Regs:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
-def : Pat<(bf16 (sint_to_fp Int32Regs:$a)),
-          (CVT_bf16_s32 Int32Regs:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
-def : Pat<(bf16 (sint_to_fp Int64Regs:$a)),
-          (CVT_bf16_s64 Int64Regs:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
+def : Pat<(bf16 (sint_to_fp i16:$a)),
+          (CVT_bf16_s16 i16:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
+def : Pat<(bf16 (sint_to_fp i32:$a)),
+          (CVT_bf16_s32 i32:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
+def : Pat<(bf16 (sint_to_fp i64:$a)),
+          (CVT_bf16_s64 i64:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
 
 // uint -> bf16
-def : Pat<(bf16 (uint_to_fp Int1Regs:$a)),
+def : Pat<(bf16 (uint_to_fp i1:$a)),
           (CVT_bf16_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
-def : Pat<(bf16 (uint_to_fp Int16Regs:$a)),
-          (CVT_bf16_u16 Int16Regs:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
-def : Pat<(bf16 (uint_to_fp Int32Regs:$a)),
-          (CVT_bf16_u32 Int32Regs:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
-def : Pat<(bf16 (uint_to_fp Int64Regs:$a)),
-          (CVT_bf16_u64 Int64Regs:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
+def : Pat<(bf16 (uint_to_fp i16:$a)),
+          (CVT_bf16_u16 i16:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
+def : Pat<(bf16 (uint_to_fp i32:$a)),
+          (CVT_bf16_u32 i32:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
+def : Pat<(bf16 (uint_to_fp i64:$a)),
+          (CVT_bf16_u64 i64:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
 
 // sint -> f32
-def : Pat<(f32 (sint_to_fp Int1Regs:$a)),
+def : Pat<(f32 (sint_to_fp i1:$a)),
           (CVT_f32_s32 (SELP_s32ii -1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int16Regs:$a)),
-          (CVT_f32_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int32Regs:$a)),
-          (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f32 (sint_to_fp Int64Regs:$a)),
-          (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp i16:$a)),
+          (CVT_f32_s16 i16:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp i32:$a)),
+          (CVT_f32_s32 i32:$a, CvtRN)>;
+def : Pat<(f32 (sint_to_fp i64:$a)),
+          (CVT_f32_s64 i64:$a, CvtRN)>;
 
 // uint -> f32
-def : Pat<(f32 (uint_to_fp Int1Regs:$a)),
+def : Pat<(f32 (uint_to_fp i1:$a)),
           (CVT_f32_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int16Regs:$a)),
+def : Pat<(f32 (uint_to_fp i16:$a)),
           (CVT_f32_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int32Regs:$a)),
-          (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f32 (uint_to_fp Int64Regs:$a)),
-          (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp i32:$a)),
+          (CVT_f32_u32 i32:$a, CvtRN)>;
+def : Pat<(f32 (uint_to_fp i64:$a)),
+          (CVT_f32_u64 i64:$a, CvtRN)>;
 
 // sint -> f64
-def : Pat<(f64 (sint_to_fp Int1Regs:$a)),
+def : Pat<(f64 (sint_to_fp i1:$a)),
           (CVT_f64_s32 (SELP_s32ii -1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int16Regs:$a)),
+def : Pat<(f64 (sint_to_fp i16:$a)),
           (CVT_f64_s16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int32Regs:$a)),
-          (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f64 (sint_to_fp Int64Regs:$a)),
-          (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp i32:$a)),
+          (CVT_f64_s32 i32:$a, CvtRN)>;
+def : Pat<(f64 (sint_to_fp i64:$a)),
+          (CVT_f64_s64 i64:$a, CvtRN)>;
 
 // uint -> f64
-def : Pat<(f64 (uint_to_fp Int1Regs:$a)),
+def : Pat<(f64 (uint_to_fp i1:$a)),
           (CVT_f64_u32 (SELP_u32ii 1, 0, Int1Regs:$a), CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int16Regs:$a)),
+def : Pat<(f64 (uint_to_fp i16:$a)),
           (CVT_f64_u16 Int16Regs:$a, CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int32Regs:$a)),
-          (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(f64 (uint_to_fp Int64Regs:$a)),
-          (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp i32:$a)),
+          (CVT_f64_u32 i32:$a, CvtRN)>;
+def : Pat<(f64 (uint_to_fp i64:$a)),
+          (CVT_f64_u64 i64:$a, CvtRN)>;
 
 
 // f16 -> sint
-def : Pat<(i1 (fp_to_sint (f16 Int16Regs:$a))),
+def : Pat<(i1 (fp_to_sint f16:$a)),
           (SETP_b16ri Int16Regs:$a, 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint (f16 Int16Regs:$a))),
-          (CVT_s16_f16 (f16 Int16Regs:$a), CvtRZI)>;
-def : Pat<(i32 (fp_to_sint (f16 Int16Regs:$a))),
-          (CVT_s32_f16 (f16 Int16Regs:$a), CvtRZI)>;
-def : Pat<(i64 (fp_to_sint (f16 Int16Regs:$a))),
+def : Pat<(i16 (fp_to_sint f16:$a)),
+          (CVT_s16_f16 Int16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint f16:$a)),
+          (CVT_s32_f16 Int16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint f16:$a)),
           (CVT_s64_f16 Int16Regs:$a, CvtRZI)>;
 
 // f16 -> uint
-def : Pat<(i1 (fp_to_uint (f16 Int16Regs:$a))),
+def : Pat<(i1 (fp_to_uint f16:$a)),
           (SETP_b16ri Int16Regs:$a, 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint (f16 Int16Regs:$a))),
+def : Pat<(i16 (fp_to_uint f16:$a)),
           (CVT_u16_f16 Int16Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint (f16 Int16Regs:$a))),
+def : Pat<(i32 (fp_to_uint f16:$a)),
           (CVT_u32_f16 Int16Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint (f16 Int16Regs:$a))),
+def : Pat<(i64 (fp_to_uint f16:$a)),
           (CVT_u64_f16 Int16Regs:$a, CvtRZI)>;
 
 // bf16 -> sint
-def : Pat<(i1 (fp_to_sint (bf16 Int16Regs:$a))),
+def : Pat<(i1 (fp_to_sint bf16:$a)),
           (SETP_b16ri Int16Regs:$a, 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint (bf16 Int16Regs:$a))),
-          (CVT_s16_bf16 (bf16 Int16Regs:$a), CvtRZI)>;
-def : Pat<(i32 (fp_to_sint (bf16 Int16Regs:$a))),
-          (CVT_s32_bf16 (bf16 Int16Regs:$a), CvtRZI)>;
-def : Pat<(i64 (fp_to_sint (bf16 Int16Regs:$a))),
+def : Pat<(i16 (fp_to_sint bf16:$a)),
+          (CVT_s16_bf16 Int16Regs:$a, CvtRZI)>;
+def : Pat<(i32 (fp_to_sint bf16:$a)),
+          (CVT_s32_bf16 Int16Regs:$a, CvtRZI)>;
+def : Pat<(i64 (fp_to_sint bf16:$a)),
           (CVT_s64_bf16 Int16Regs:$a, CvtRZI)>;
 
 // bf16 -> uint
-def : Pat<(i1 (fp_to_uint (bf16 Int16Regs:$a))),
+def : Pat<(i1 (fp_to_uint bf16:$a)),
           (SETP_b16ri Int16Regs:$a, 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint (bf16 Int16Regs:$a))),
+def : Pat<(i16 (fp_to_uint bf16:$a)),
           (CVT_u16_bf16 Int16Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint (bf16 Int16Regs:$a))),
+def : Pat<(i32 (fp_to_uint bf16:$a)),
           (CVT_u32_bf16 Int16Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint (bf16 Int16Regs:$a))),
+def : Pat<(i64 (fp_to_uint bf16:$a)),
           (CVT_u64_bf16 Int16Regs:$a, CvtRZI)>;
 // f32 -> sint
-def : Pat<(i1 (fp_to_sint Float32Regs:$a)),
+def : Pat<(i1 (fp_to_sint f32:$a)),
           (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+def : Pat<(i16 (fp_to_sint f32:$a)),
           (CVT_s16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_sint Float32Regs:$a)),
+def : Pat<(i16 (fp_to_sint f32:$a)),
           (CVT_s16_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+def : Pat<(i32 (fp_to_sint f32:$a)),
           (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_sint Float32Regs:$a)),
+def : Pat<(i32 (fp_to_sint f32:$a)),
           (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+def : Pat<(i64 (fp_to_sint f32:$a)),
           (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_sint Float32Regs:$a)),
+def : Pat<(i64 (fp_to_sint f32:$a)),
           (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
 
 // f32 -> uint
-def : Pat<(i1 (fp_to_uint Float32Regs:$a)),
+def : Pat<(i1 (fp_to_uint f32:$a)),
           (SETP_b32ri (BITCONVERT_32_F2I Float32Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+def : Pat<(i16 (fp_to_uint f32:$a)),
           (CVT_u16_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i16 (fp_to_uint Float32Regs:$a)),
+def : Pat<(i16 (fp_to_uint f32:$a)),
           (CVT_u16_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+def : Pat<(i32 (fp_to_uint f32:$a)),
           (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i32 (fp_to_uint Float32Regs:$a)),
+def : Pat<(i32 (fp_to_uint f32:$a)),
           (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+def : Pat<(i64 (fp_to_uint f32:$a)),
           (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(i64 (fp_to_uint Float32Regs:$a)),
+def : Pat<(i64 (fp_to_uint f32:$a)),
           (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
 
 // f64 -> sint
-def : Pat<(i1 (fp_to_sint Float64Regs:$a)),
+def : Pat<(i1 (fp_to_sint f64:$a)),
           (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_sint Float64Regs:$a)),
+def : Pat<(i16 (fp_to_sint f64:$a)),
           (CVT_s16_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_sint Float64Regs:$a)),
+def : Pat<(i32 (fp_to_sint f64:$a)),
           (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_sint Float64Regs:$a)),
+def : Pat<(i64 (fp_to_sint f64:$a)),
           (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
 
 // f64 -> uint
-def : Pat<(i1 (fp_to_uint Float64Regs:$a)),
+def : Pat<(i1 (fp_to_uint f64:$a)),
           (SETP_b64ri (BITCONVERT_64_F2I Float64Regs:$a), 0, CmpEQ)>;
-def : Pat<(i16 (fp_to_uint Float64Regs:$a)),
+def : Pat<(i16 (fp_to_uint f64:$a)),
           (CVT_u16_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i32 (fp_to_uint Float64Regs:$a)),
+def : Pat<(i32 (fp_to_uint f64:$a)),
           (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(i64 (fp_to_uint Float64Regs:$a)),
+def : Pat<(i64 (fp_to_uint f64:$a)),
           (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
 
 // sext i1
-def : Pat<(i16 (sext Int1Regs:$a)),
+def : Pat<(i16 (sext i1:$a)),
           (SELP_s16ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (sext Int1Regs:$a)),
+def : Pat<(i32 (sext i1:$a)),
           (SELP_s32ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (sext Int1Regs:$a)),
+def : Pat<(i64 (sext i1:$a)),
           (SELP_s64ii -1, 0, Int1Regs:$a)>;
 
 // zext i1
-def : Pat<(i16 (zext Int1Regs:$a)),
+def : Pat<(i16 (zext i1:$a)),
           (SELP_u16ii 1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (zext Int1Regs:$a)),
+def : Pat<(i32 (zext i1:$a)),
           (SELP_u32ii 1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (zext Int1Regs:$a)),
+def : Pat<(i64 (zext i1:$a)),
           (SELP_u64ii 1, 0, Int1Regs:$a)>;
 
 // anyext i1
-def : Pat<(i16 (anyext Int1Regs:$a)),
+def : Pat<(i16 (anyext i1:$a)),
           (SELP_u16ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i32 (anyext Int1Regs:$a)),
+def : Pat<(i32 (anyext i1:$a)),
           (SELP_u32ii -1, 0, Int1Regs:$a)>;
-def : Pat<(i64 (anyext Int1Regs:$a)),
+def : Pat<(i64 (anyext i1:$a)),
           (SELP_u64ii -1, 0, Int1Regs:$a)>;
 
 // sext i16
-def : Pat<(i32 (sext Int16Regs:$a)),
+def : Pat<(i32 (sext i16:$a)),
           (CVT_s32_s16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (sext Int16Regs:$a)),
+def : Pat<(i64 (sext i16:$a)),
           (CVT_s64_s16 Int16Regs:$a, CvtNONE)>;
 
 // zext i16
-def : Pat<(i32 (zext Int16Regs:$a)),
+def : Pat<(i32 (zext i16:$a)),
           (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (zext Int16Regs:$a)),
+def : Pat<(i64 (zext i16:$a)),
           (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
 
 // anyext i16
-def : Pat<(i32 (anyext Int16Regs:$a)),
+def : Pat<(i32 (anyext i16:$a)),
           (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
-def : Pat<(i64 (anyext Int16Regs:$a)),
+def : Pat<(i64 (anyext i16:$a)),
           (CVT_u64_u16 Int16Regs:$a, CvtNONE)>;
 
 // sext i32
-def : Pat<(i64 (sext Int32Regs:$a)),
+def : Pat<(i64 (sext i32:$a)),
           (CVT_s64_s32 Int32Regs:$a, CvtNONE)>;
 
 // zext i32
-def : Pat<(i64 (zext Int32Regs:$a)),
+def : Pat<(i64 (zext i32:$a)),
           (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
 
 // anyext i32
-def : Pat<(i64 (anyext Int32Regs:$a)),
+def : Pat<(i64 (anyext i32:$a)),
           (CVT_u64_u32 Int32Regs:$a, CvtNONE)>;
 
 
 // truncate i64
-def : Pat<(i32 (trunc Int64Regs:$a)),
+def : Pat<(i32 (trunc i64:$a)),
           (CVT_u32_u64 Int64Regs:$a, CvtNONE)>;
-def : Pat<(i16 (trunc Int64Regs:$a)),
+def : Pat<(i16 (trunc i64:$a)),
           (CVT_u16_u64 Int64Regs:$a, CvtNONE)>;
-def : Pat<(i1 (trunc Int64Regs:$a)),
+def : Pat<(i1 (trunc i64:$a)),
           (SETP_b64ri (ANDb64ri Int64Regs:$a, 1), 1, CmpEQ)>;
 
 // truncate i32
-def : Pat<(i16 (trunc Int32Regs:$a)),
+def : Pat<(i16 (trunc i32:$a)),
           (CVT_u16_u32 Int32Regs:$a, CvtNONE)>;
-def : Pat<(i1 (trunc Int32Regs:$a)),
+def : Pat<(i1 (trunc i32:$a)),
           (SETP_b32ri (ANDb32ri Int32Regs:$a, 1), 1, CmpEQ)>;
 
 // truncate i16
-def : Pat<(i1 (trunc Int16Regs:$a)),
+def : Pat<(i1 (trunc i16:$a)),
           (SETP_b16ri (ANDb16ri Int16Regs:$a, 1), 1, CmpEQ)>;
 
 // sext_inreg
-def : Pat<(sext_inreg Int16Regs:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
-def : Pat<(sext_inreg Int32Regs:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
-def : Pat<(sext_inreg Int32Regs:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
-def : Pat<(sext_inreg Int64Regs:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
+def : Pat<(sext_inreg i16:$a, i8), (CVT_INREG_s16_s8 Int16Regs:$a)>;
+def : Pat<(sext_inreg i32:$a, i8), (CVT_INREG_s32_s8 Int32Regs:$a)>;
+def : Pat<(sext_inreg i32:$a, i16), (CVT_INREG_s32_s16 Int32Regs:$a)>;
+def : Pat<(sext_inreg i64:$a, i8), (CVT_INREG_s64_s8 Int64Regs:$a)>;
+def : Pat<(sext_inreg i64:$a, i16), (CVT_INREG_s64_s16 Int64Regs:$a)>;
+def : Pat<(sext_inreg i64:$a, i32), (CVT_INREG_s64_s32 Int64Regs:$a)>;
 
 
 // Select instructions with 32-bit predicates
-def : Pat<(select (i32 Int32Regs:$pred), i16:$a, i16:$b),
+def : Pat<(select i32:$pred, i16:$a, i16:$b),
           (SELP_b16rr Int16Regs:$a, Int16Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select (i32 Int32Regs:$pred), i32:$a, i32:$b),
+def : Pat<(select i32:$pred, i32:$a, i32:$b),
           (SELP_b32rr Int32Regs:$a, Int32Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select (i32 Int32Regs:$pred), Int64Regs:$a, Int64Regs:$b),
+def : Pat<(select i32:$pred, i64:$a, i64:$b),
           (SELP_b64rr Int64Regs:$a, Int64Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select (i32 Int32Regs:$pred), (f16 Int16Regs:$a), (f16 Int16Regs:$b)),
+def : Pat<(select i32:$pred, f16:$a, f16:$b),
           (SELP_f16rr Int16Regs:$a, Int16Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select (i32 Int32Regs:$pred), (bf16 Int16Regs:$a), (bf16 Int16Regs:$b)),
+def : Pat<(select i32:$pred, bf16:$a, bf16:$b),
           (SELP_bf16rr Int16Regs:$a, Int16Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select (i32 Int32Regs:$pred), Float32Regs:$a, Float32Regs:$b),
+def : Pat<(select i32:$pred, f32:$a, f32:$b),
           (SELP_f32rr Float32Regs:$a, Float32Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
-def : Pat<(select (i32 Int32Regs:$pred), Float64Regs:$a, Float64Regs:$b),
+def : Pat<(select i32:$pred, f64:$a, f64:$b),
           (SELP_f64rr Float64Regs:$a, Float64Regs:$b,
           (SETP_b32ri (ANDb32ri Int32Regs:$pred, 1), 1, CmpEQ))>;
 
@@ -3464,32 +3464,32 @@ let hasSideEffects = false in {
 
 // Using partial vectorized move produces better SASS code for extraction of
 // upper/lower parts of an integer.
-def : Pat<(i16 (trunc (srl Int32Regs:$s, (i32 16)))),
+def : Pat<(i16 (trunc (srl i32:$s, (i32 16)))),
           (I32toI16H Int32Regs:$s)>;
-def : Pat<(i16 (trunc (sra Int32Regs:$s, (i32 16)))),
+def : Pat<(i16 (trunc (sra i32:$s, (i32 16)))),
           (I32toI16H Int32Regs:$s)>;
-def : Pat<(i32 (trunc (srl Int64Regs:$s, (i32 32)))),
+def : Pat<(i32 (trunc (srl i64:$s, (i32 32)))),
           (I64toI32H Int64Regs:$s)>;
-def : Pat<(i32 (trunc (sra Int64Regs:$s, (i32 32)))),
+def : Pat<(i32 (trunc (sra i64:$s, (i32 32)))),
           (I64toI32H Int64Regs:$s)>;
 
-def: Pat<(i32 (sext (extractelt (v2i16 Int32Regs:$src), 0))),
+def: Pat<(i32 (sext (extractelt v2i16:$src, 0))),
          (CVT_INREG_s32_s16 Int32Regs:$src)>;
 
 foreach vt = [v2f16, v2bf16, v2i16] in {
-def : Pat<(extractelt (vt Int32Regs:$src), 0),
+def : Pat<(extractelt vt:$src, 0),
           (I32toI16L Int32Regs:$src)>;
-def : Pat<(extractelt (vt Int32Regs:$src), 1),
+def : Pat<(extractelt vt:$src, 1),
           (I32toI16H Int32Regs:$src)>;
 }
-def : Pat<(v2f16 (build_vector (f16 Int16Regs:$a), (f16 Int16Regs:$b))),
+def : Pat<(v2f16 (build_vector f16:$a, f16:$b)),
           (V2I16toI32 Int16Regs:$a, Int16Regs:$b)>;
-def : Pat<(v2bf16 (build_vector (bf16 Int16Regs:$a), (bf16 Int16Regs:$b))),
+def : Pat<(v2bf16 (build_vector bf16:$a, bf16:$b)),
           (V2I16toI32 Int16Regs:$a, Int16Regs:$b)>;
-def : Pat<(v2i16 (build_vector (i16 Int16Regs:$a), (i16 Int16Regs:$b))),
+def : Pat<(v2i16 (build_vector i16:$a, i16:$b)),
           (V2I16toI32 Int16Regs:$a, Int16Regs:$b)>;
 
-def: Pat<(v2i16 (scalar_to_vector (i16 Int16Regs:$a))),
+def: Pat<(v2i16 (scalar_to_vector i16:$a)),
          (CVT_u32_u16 Int16Regs:$a, CvtNONE)>;
 
 //
@@ -3509,16 +3509,16 @@ let hasSideEffects = false in {
       : NVPTXInst<(outs Int32Regs:$dst),
                   (ins  Int32Regs:$lo, Int32Regs:$hi, i32imm:$amt),
                   "shf." # mode # ".b32 \t$dst, $lo, $hi, $amt;",
-                  [(set Int32Regs:$dst,
-                      (op (i32 Int32Regs:$hi), (i32 Int32Regs:$lo), (i32 imm:$amt)))]>,
+                  [(set i32:$dst,
+                      (op i32:$hi, i32:$lo, (i32 imm:$amt)))]>,
         Requires<[hasHWROT32]>;
 
     def _r
       : NVPTXInst<(outs Int32Regs:$dst),
                   (ins  Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt),
                   "shf." # mode # ".b32 \t$dst, $lo, $hi, $amt;",
-                  [(set Int32Regs:$dst,
-                      (op (i32 Int32Regs:$hi), (i32 Int32Regs:$lo), (i32 Int32Regs:$amt)))]>,
+                  [(set i32:$dst,
+                      (op i32:$hi, i32:$lo, i32:$amt))]>,
         Requires<[hasHWROT32]>;
   }
 
@@ -3528,14 +3528,14 @@ let hasSideEffects = false in {
   defm SHF_R_WRAP  : ShfInst<"r.wrap", fshr>;
 }
 
-def : Pat<(i32 (int_nvvm_fshl_clamp (i32 Int32Regs:$hi), (i32 Int32Regs:$lo), (i32 Int32Regs:$amt))),
-          (SHF_L_CLAMP_r (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 Int32Regs:$amt))>;
-def : Pat<(i32 (int_nvvm_fshl_clamp (i32 Int32Regs:$hi), (i32 Int32Regs:$lo), (i32 imm:$amt))),
-          (SHF_L_CLAMP_i (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 imm:$amt))>;
-def : Pat<(i32 (int_nvvm_fshr_clamp (i32 Int32Regs:$hi), (i32 Int32Regs:$lo), (i32 Int32Regs:$amt))),
-          (SHF_R_CLAMP_r (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 Int32Regs:$amt))>;
-def : Pat<(i32 (int_nvvm_fshr_clamp (i32 Int32Regs:$hi), (i32 Int32Regs:$lo), (i32 imm:$amt))),
-          (SHF_R_CLAMP_i (i32 Int32Regs:$lo), (i32 Int32Regs:$hi), (i32 imm:$amt))>;
+def : Pat<(i32 (int_nvvm_fshl_clamp i32:$hi, i32:$lo, i32:$amt)),
+          (SHF_L_CLAMP_r Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt)>;
+def : Pat<(i32 (int_nvvm_fshl_clamp i32:$hi, i32:$lo, (i32 imm:$amt))),
+          (SHF_L_CLAMP_i Int32Regs:$lo, Int32Regs:$hi, imm:$amt)>;
+def : Pat<(i32 (int_nvvm_fshr_clamp i32:$hi, i32:$lo, i32:$amt)),
+          (SHF_R_CLAMP_r Int32Regs:$lo, Int32Regs:$hi, Int32Regs:$amt)>;
+def : Pat<(i32 (int_nvvm_fshr_clamp i32:$hi, i32:$lo, (i32 imm:$amt))),
+          (SHF_R_CLAMP_i Int32Regs:$lo, Int32Regs:$hi, imm:$amt)>;
 
 // Count leading zeros
 let hasSideEffects = false in {
@@ -3546,14 +3546,14 @@ let hasSideEffects = false in {
 }
 
 // 32-bit has a direct PTX instruction
-def : Pat<(i32 (ctlz (i32 Int32Regs:$a))), (CLZr32 Int32Regs:$a)>;
+def : Pat<(i32 (ctlz i32:$a)), (CLZr32 i32:$a)>;
 
 // The return type of the ctlz ISD node is the same as its input, but the PTX
 // ctz instruction always returns a 32-bit value.  For ctlz.i64, convert the
 // ptx value to 64 bits to match the ISD node's semantics, unless we know we're
 // truncating back down to 32 bits.
-def : Pat<(i64 (ctlz Int64Regs:$a)), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
-def : Pat<(i32 (trunc (i64 (ctlz Int64Regs:$a)))), (CLZr64 Int64Regs:$a)>;
+def : Pat<(i64 (ctlz i64:$a)), (CVT_u64_u32 (CLZr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (i64 (ctlz i64:$a)))), (CLZr64 Int64Regs:$a)>;
 
 // For 16-bit ctlz, we zero-extend to 32-bit, perform the count, then trunc the
 // result back to 16-bits if necessary.  We also need to subtract 16 because
@@ -3569,10 +3569,10 @@ def : Pat<(i32 (trunc (i64 (ctlz Int64Regs:$a)))), (CLZr64 Int64Regs:$a)>;
 // and then ctlz that value.  This way we don't have to subtract 16 from the
 // result.  Unfortunately today we don't have a way to generate
 // "mov b32reg, {b16imm, b16reg}", so we don't do this optimization.
-def : Pat<(i16 (ctlz Int16Regs:$a)),
+def : Pat<(i16 (ctlz i16:$a)),
           (SUBi16ri (CVT_u16_u32
            (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE), 16)>;
-def : Pat<(i32 (zext (i16 (ctlz Int16Regs:$a)))),
+def : Pat<(i32 (zext (i16 (ctlz i16:$a)))),
           (SUBi32ri (CLZr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), 16)>;
 
 // Population count
@@ -3584,66 +3584,66 @@ let hasSideEffects = false in {
 }
 
 // 32-bit has a direct PTX instruction
-def : Pat<(i32 (ctpop (i32 Int32Regs:$a))), (POPCr32 Int32Regs:$a)>;
+def : Pat<(i32 (ctpop i32:$a)), (POPCr32 Int32Regs:$a)>;
 
 // For 64-bit, the result in PTX is actually 32-bit so we zero-extend to 64-bit
 // to match the LLVM semantics.  Just as with ctlz.i64, we provide a second
 // pattern that avoids the type conversion if we're truncating the result to
 // i32 anyway.
-def : Pat<(ctpop Int64Regs:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
-def : Pat<(i32 (trunc (i64 (ctpop Int64Regs:$a)))), (POPCr64 Int64Regs:$a)>;
+def : Pat<(ctpop i64:$a), (CVT_u64_u32 (POPCr64 Int64Regs:$a), CvtNONE)>;
+def : Pat<(i32 (trunc (i64 (ctpop i64:$a)))), (POPCr64 Int64Regs:$a)>;
 
 // For 16-bit, we zero-extend to 32-bit, then trunc the result back to 16-bits.
 // If we know that we're storing into an i32, we can avoid the final trunc.
-def : Pat<(ctpop Int16Regs:$a),
+def : Pat<(ctpop i16:$a),
           (CVT_u16_u32 (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE)), CvtNONE)>;
-def : Pat<(i32 (zext (i16 (ctpop Int16Regs:$a)))),
+def : Pat<(i32 (zext (i16 (ctpop i16:$a)))),
           (POPCr32 (CVT_u32_u16 Int16Regs:$a, CvtNONE))>;
 
 // fpround f32 -> f16
-def : Pat<(f16 (fpround Float32Regs:$a)),
+def : Pat<(f16 (fpround f32:$a)),
           (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
 
 // fpround f32 -> bf16
-def : Pat<(bf16 (fpround Float32Regs:$a)),
+def : Pat<(bf16 (fpround f32:$a)),
           (CVT_bf16_f32 Float32Regs:$a, CvtRN)>, Requires<[hasPTX<70>, hasSM<80>]>;
 
 // fpround f64 -> f16
-def : Pat<(f16 (fpround Float64Regs:$a)),
+def : Pat<(f16 (fpround f64:$a)),
           (CVT_f16_f64 Float64Regs:$a, CvtRN)>;
 
 // fpround f64 -> bf16
-def : Pat<(bf16 (fpround Float64Regs:$a)),
+def : Pat<(bf16 (fpround f64:$a)),
           (CVT_bf16_f64 Float64Regs:$a, CvtRN)>, Requires<[hasPTX<78>, hasSM<90>]>;
 // fpround f64 -> f32
-def : Pat<(f32 (fpround Float64Regs:$a)),
+def : Pat<(f32 (fpround f64:$a)),
           (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fpround Float64Regs:$a)),
+def : Pat<(f32 (fpround f64:$a)),
           (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
 
 // fpextend f16 -> f32
-def : Pat<(f32 (fpextend (f16 Int16Regs:$a))),
+def : Pat<(f32 (fpextend f16:$a)),
           (CVT_f32_f16 Int16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fpextend (f16 Int16Regs:$a))),
+def : Pat<(f32 (fpextend f16:$a)),
           (CVT_f32_f16 Int16Regs:$a, CvtNONE)>;
 // fpextend bf16 -> f32
-def : Pat<(f32 (fpextend (bf16 Int16Regs:$a))),
+def : Pat<(f32 (fpextend bf16:$a)),
           (CVT_f32_bf16 Int16Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f32 (fpextend (bf16 Int16Regs:$a))),
+def : Pat<(f32 (fpextend bf16:$a)),
           (CVT_f32_bf16 Int16Regs:$a, CvtNONE)>, Requires<[hasPTX<71>, hasSM<80>]>;
 
 // fpextend f16 -> f64
-def : Pat<(f64 (fpextend (f16 Int16Regs:$a))),
+def : Pat<(f64 (fpextend f16:$a)),
           (CVT_f64_f16 Int16Regs:$a, CvtNONE)>;
 
 // fpextend bf16 -> f64
-def : Pat<(f64 (fpextend (bf16 Int16Regs:$a))),
+def : Pat<(f64 (fpextend bf16:$a)),
           (CVT_f64_bf16 Int16Regs:$a, CvtNONE)>, Requires<[hasPTX<78>, hasSM<90>]>;
 
 // fpextend f32 -> f64
-def : Pat<(f64 (fpextend Float32Regs:$a)),
+def : Pat<(f64 (fpextend f32:$a)),
           (CVT_f64_f32 Float32Regs:$a, CvtNONE_FTZ)>, Requires<[doF32FTZ]>;
-def : Pat<(f64 (fpextend Float32Regs:$a)),
+def : Pat<(f64 (fpextend f32:$a)),
           (CVT_f64_f32 Float32Regs:$a, CvtNONE)>;
 
 def retglue : SDNode<"NVPTXISD::RET_GLUE", SDTNone,
@@ -3652,15 +3652,15 @@ def retglue : SDNode<"NVPTXISD::RET_GLUE", SDTNone,
 // fceil, ffloor, froundeven, ftrunc.
 
 multiclass CVT_ROUND<SDNode OpNode, PatLeaf Mode, PatLeaf ModeFTZ> {
-  def : Pat<(OpNode (f16 Int16Regs:$a)),
+  def : Pat<(OpNode f16:$a),
             (CVT_f16_f16 Int16Regs:$a, Mode)>;
-  def : Pat<(OpNode (bf16 Int16Regs:$a)),
+  def : Pat<(OpNode bf16:$a),
             (CVT_bf16_bf16 Int16Regs:$a, Mode)>;
-  def : Pat<(OpNode Float32Regs:$a),
+  def : Pat<(OpNode f32:$a),
             (CVT_f32_f32 Float32Regs:$a, ModeFTZ)>, Requires<[doF32FTZ]>;
-  def : Pat<(OpNode Float32Regs:$a),
+  def : Pat<(OpNode f32:$a),
             (CVT_f32_f32 Float32Regs:$a, Mode)>, Requires<[doNoF32FTZ]>;
-  def : Pat<(OpNode Float64Regs:$a),
+  def : Pat<(OpNode f64:$a),
             (CVT_f64_f64 Float64Regs:$a, Mode)>;
 }
 
@@ -3687,7 +3687,7 @@ let isTerminator=1 in {
    let isBranch=1 in
       def CBranch : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
                               "@$a bra \t$target;",
-                              [(brcond Int1Regs:$a, bb:$target)]>;
+                              [(brcond i1:$a, bb:$target)]>;
    let isBranch=1 in
       def CBranchOther : NVPTXInst<(outs), (ins Int1Regs:$a, brtarget:$target),
                                    "@!$a bra \t$target;", []>;
@@ -3697,7 +3697,7 @@ let isTerminator=1 in {
                            "bra.uni \t$target;", [(br bb:$target)]>;
 }
 
-def : Pat<(brcond (i32 Int32Regs:$a), bb:$target),
+def : Pat<(brcond i32:$a, bb:$target),
           (CBranch (SETP_u32ri Int32Regs:$a, 0, CmpNE), bb:$target)>;
 
 // SelectionDAGBuilder::visitSWitchCase() will invert the condition of a
@@ -3705,8 +3705,8 @@ def : Pat<(brcond (i32 Int32Regs:$a), bb:$target),
 // can fall through to the target block.  The invertion is done by 'xor
 // condition, 1', which will be translated to (setne condition, -1).  Since ptx
 // supports '@!pred bra target', we should use it.
-def : Pat<(brcond (i1 (setne Int1Regs:$a, -1)), bb:$target),
-          (CBranchOther Int1Regs:$a, bb:$target)>;
+def : Pat<(brcond (i1 (setne i1:$a, -1)), bb:$target),
+          (CBranchOther i1:$a, bb:$target)>;
 
 // Call
 def SDT_NVPTXCallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>,
@@ -3775,7 +3775,7 @@ def DYNAMIC_STACKALLOC32 :
             (ins Int32Regs:$size, i32imm:$align),
             "alloca.u32 \t$ptr, $size, $align;\n\t"
             "cvta.local.u32 \t$ptr, $ptr;",
-            [(set (i32 Int32Regs:$ptr), (dyn_alloca Int32Regs:$size, (i32 timm:$align)))]>,
+            [(set i32:$ptr, (dyn_alloca i32:$size, (i32 timm:$align)))]>,
             Requires<[hasPTX<73>, hasSM<52>]>;
 
 def DYNAMIC_STACKALLOC64 :
@@ -3783,7 +3783,7 @@ def DYNAMIC_STACKALLOC64 :
             (ins Int64Regs:$size, i32imm:$align),
             "alloca.u64 \t$ptr, $size, $align;\n\t"
             "cvta.local.u64 \t$ptr, $ptr;",
-            [(set Int64Regs:$ptr, (dyn_alloca Int64Regs:$size, (i32 timm:$align)))]>,
+            [(set i64:$ptr, (dyn_alloca i64:$size, (i32 timm:$align)))]>,
             Requires<[hasPTX<73>, hasSM<52>]>;
 
 
@@ -3820,7 +3820,7 @@ let isTerminator = 1, isBranch = 1, isIndirectBranch = 1, isNotDuplicable = 1 in
   def BRX_END :
     NVPTXInst<(outs), (ins brtarget:$target, Int32Regs:$val, i32imm:$id),
               "\t$target;\n\tbrx.idx \t$val, $$L_brx_$id;",
-              [(brx_end bb:$target, (i32 Int32Regs:$val), (i32 imm:$id))]> {
+              [(brx_end bb:$target, i32:$val, (i32 imm:$id))]> {
       let isBarrier = 1;
     }
 }
@@ -3833,9 +3833,9 @@ foreach a_type = ["s", "u"] in {
       NVPTXInst<(outs Int32Regs:$dst),
                 (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
                 "dp4a." # a_type # "32." # b_type # "32 \t$dst, $a, $b, $c;",
-                [(set Int32Regs:$dst,
+                [(set i32:$dst,
                     (!cast<Intrinsic>("int_nvvm_idp4a_" # a_type # "_" # b_type)
-                     (i32 Int32Regs:$a), (i32 Int32Regs:$b), (i32 Int32Regs:$c)))]>,
+                     i32:$a, i32:$b, i32:$c))]>,
                 Requires<[hasDotInstructions]>;
 
     foreach is_hi = [0, -1] in {
@@ -3845,9 +3845,9 @@ foreach a_type = ["s", "u"] in {
         NVPTXInst<(outs Int32Regs:$dst),
                   (ins Int32Regs:$a, Int32Regs:$b, Int32Regs:$c),
                   "dp2a." # lohi_suffix # "." # a_type # "32." # b_type # "32 \t$dst, $a, $b, $c;",
-                  [(set Int32Regs:$dst,
+                  [(set i32:$dst,
                       (!cast<Intrinsic>("int_nvvm_idp2a_" # a_type # "_" # b_type)
-                       (i32 Int32Regs:$a), (i32 Int32Regs:$b), is_hi, (i32 Int32Regs:$c)))]>,
+                       i32:$a, i32:$b, is_hi, i32:$c))]>,
                   Requires<[hasDotInstructions]>;
     }
   }
@@ -3870,25 +3870,25 @@ def stacksave :
 def STACKRESTORE_32 :
   NVPTXInst<(outs), (ins Int32Regs:$ptr),
             "stackrestore.u32 \t$ptr;",
-            [(stackrestore (i32 Int32Regs:$ptr))]>,
+            [(stackrestore i32:$ptr)]>,
             Requires<[hasPTX<73>, hasSM<52>]>;
 
 def STACKSAVE_32 :
   NVPTXInst<(outs Int32Regs:$dst), (ins),
             "stacksave.u32 \t$dst;",
-            [(set Int32Regs:$dst, (i32 stacksave))]>,
+            [(set i32:$dst, (i32 stacksave))]>,
             Requires<[hasPTX<73>, hasSM<52>]>;
 
 def STACKRESTORE_64 :
   NVPTXInst<(outs), (ins Int64Regs:$ptr),
             "stackrestore.u64 \t$ptr;",
-            [(stackrestore (i64 Int64Regs:$ptr))]>,
+            [(stackrestore i64:$ptr)]>,
             Requires<[hasPTX<73>, hasSM<52>]>;
 
 def STACKSAVE_64 :
   NVPTXInst<(outs Int64Regs:$dst), (ins),
             "stacksave.u64 \t$dst;",
-            [(set Int64Regs:$dst, (i64 stacksave))]>,
+            [(set i64:$dst, (i64 stacksave))]>,
             Requires<[hasPTX<73>, hasSM<52>]>;
 
 include "NVPTXIntrinsics.td"
@@ -3983,19 +3983,19 @@ def FMARELU_F16X2_FTZ : NVPTXInst_rrr<Int32Regs, "fma.rn.ftz.relu.f16x2", [useFP
 def FMARELU_BF16X2 : NVPTXInst_rrr<Int32Regs, "fma.rn.relu.bf16x2", [hasBF16Math, hasPTX<70>, hasSM<80>]>;
 
 // FTZ
-def : Pat<(f16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan Int16Regs:$a, Int16Regs:$b, Int16Regs:$c), fpimm_any_zero)),
+def : Pat<(f16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan f16:$a, f16:$b, f16:$c), fpimm_any_zero)),
   (FMARELU_F16_FTZ Int16Regs:$a, Int16Regs:$b, Int16Regs:$c)>,
   Requires<[doF32FTZ]>;
-def : Pat<(v2f16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan Int32Regs:$a, Int32Regs:$b, Int32Regs:$c), fpimm_positive_zero_v2f16)),
+def : Pat<(v2f16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan v2f16:$a, v2f16:$b, v2f16:$c), fpimm_positive_zero_v2f16)),
   (FMARELU_F16X2_FTZ Int32Regs:$a, Int32Regs:$b, Int32Regs:$c)>,
   Requires<[doF32FTZ]>;
 
 // NO FTZ
-def : Pat<(f16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan Int16Regs:$a, Int16Regs:$b, Int16Regs:$c), fpimm_any_zero)),
+def : Pat<(f16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan f16:$a, f16:$b, f16:$c), fpimm_any_zero)),
   (FMARELU_F16 Int16Regs:$a, Int16Regs:$b, Int16Regs:$c)>;
-def : Pat<(bf16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan Int16Regs:$a, Int16Regs:$b, Int16Regs:$c), fpimm_any_zero)),
+def : Pat<(bf16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan bf16:$a, bf16:$b, bf16:$c), fpimm_any_zero)),
   (FMARELU_BF16 Int16Regs:$a, Int16Regs:$b, Int16Regs:$c)>;
-def : Pat<(v2f16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan Int32Regs:$a, Int32Regs:$b, Int32Regs:$c), fpimm_positive_zero_v2f16)),
+def : Pat<(v2f16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan v2f16:$a, v2f16:$b, v2f16:$c), fpimm_positive_zero_v2f16)),
   (FMARELU_F16X2 Int32Regs:$a, Int32Regs:$b, Int32Regs:$c)>;
-def : Pat<(v2bf16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan Int32Regs:$a, Int32Regs:$b, Int32Regs:$c), fpimm_positive_zero_v2bf16)),
+def : Pat<(v2bf16 (NVPTX_fmaxnum_nsz (NVPTX_fma_oneuse_and_nnan v2bf16:$a, v2bf16:$b, v2bf16:$c), fpimm_positive_zero_v2bf16)),
   (FMARELU_BF16X2 Int32Regs:$a, Int32Regs:$b, Int32Regs:$c)>;

diff  --git a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
index 8364b658495c75..256161d5d79c77 100644
--- a/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
+++ b/llvm/lib/Target/NVPTX/NVPTXIntrinsics.td
@@ -82,17 +82,17 @@ def INT_BARRIER0 : NVPTXInst<(outs), (ins),
       [(int_nvvm_barrier0)]>;
 def INT_BARRIERN : NVPTXInst<(outs), (ins Int32Regs:$src1),
                   "bar.sync \t$src1;",
-      [(int_nvvm_barrier_n Int32Regs:$src1)]>;
+      [(int_nvvm_barrier_n i32:$src1)]>;
 def INT_BARRIER : NVPTXInst<(outs), (ins Int32Regs:$src1, Int32Regs:$src2),
                   "bar.sync \t$src1, $src2;",
-      [(int_nvvm_barrier Int32Regs:$src1, Int32Regs:$src2)]>;
+      [(int_nvvm_barrier i32:$src1, i32:$src2)]>;
 def INT_BARRIER0_POPC : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$pred),
   !strconcat("{{ \n\t",
              ".reg .pred \t%p1; \n\t",
              "setp.ne.u32 \t%p1, $pred, 0; \n\t",
              "bar.red.popc.u32 \t$dst, 0, %p1; \n\t",
              "}}"),
-      [(set Int32Regs:$dst, (int_nvvm_barrier0_popc Int32Regs:$pred))]>;
+      [(set i32:$dst, (int_nvvm_barrier0_popc i32:$pred))]>;
 def INT_BARRIER0_AND : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$pred),
   !strconcat("{{ \n\t",
              ".reg .pred \t%p1; \n\t",
@@ -101,7 +101,7 @@ def INT_BARRIER0_AND : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$pred),
              "bar.red.and.pred \t%p2, 0, %p1; \n\t",
              "selp.u32 \t$dst, 1, 0, %p2; \n\t",
              "}}"),
-      [(set Int32Regs:$dst, (int_nvvm_barrier0_and Int32Regs:$pred))]>;
+      [(set i32:$dst, (int_nvvm_barrier0_and i32:$pred))]>;
 def INT_BARRIER0_OR : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$pred),
   !strconcat("{{ \n\t",
              ".reg .pred \t%p1; \n\t",
@@ -110,7 +110,7 @@ def INT_BARRIER0_OR : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$pred),
              "bar.red.or.pred \t%p2, 0, %p1; \n\t",
              "selp.u32 \t$dst, 1, 0, %p2; \n\t",
              "}}"),
-      [(set Int32Regs:$dst, (int_nvvm_barrier0_or Int32Regs:$pred))]>;
+      [(set i32:$dst, (int_nvvm_barrier0_or i32:$pred))]>;
 
 def INT_BAR_SYNC : NVPTXInst<(outs), (ins i32imm:$i), "bar.sync \t$i;",
                              [(int_nvvm_bar_sync imm:$i)]>;
@@ -119,27 +119,27 @@ def INT_BAR_WARP_SYNC_I : NVPTXInst<(outs), (ins i32imm:$i), "bar.warp.sync \t$i
                              [(int_nvvm_bar_warp_sync imm:$i)]>,
         Requires<[hasPTX<60>, hasSM<30>]>;
 def INT_BAR_WARP_SYNC_R : NVPTXInst<(outs), (ins Int32Regs:$i), "bar.warp.sync \t$i;",
-                             [(int_nvvm_bar_warp_sync Int32Regs:$i)]>,
+                             [(int_nvvm_bar_warp_sync i32:$i)]>,
         Requires<[hasPTX<60>, hasSM<30>]>;
 
 def INT_BARRIER_SYNC_I : NVPTXInst<(outs), (ins i32imm:$i), "barrier.sync \t$i;",
                                    [(int_nvvm_barrier_sync imm:$i)]>,
         Requires<[hasPTX<60>, hasSM<30>]>;
 def INT_BARRIER_SYNC_R : NVPTXInst<(outs), (ins Int32Regs:$i), "barrier.sync \t$i;",
-                                   [(int_nvvm_barrier_sync Int32Regs:$i)]>,
+                                   [(int_nvvm_barrier_sync i32:$i)]>,
         Requires<[hasPTX<60>, hasSM<30>]>;
 
 def INT_BARRIER_SYNC_CNT_RR : NVPTXInst<(outs), (ins Int32Regs:$id, Int32Regs:$cnt),
                  "barrier.sync \t$id, $cnt;",
-                 [(int_nvvm_barrier_sync_cnt Int32Regs:$id, Int32Regs:$cnt)]>,
+                 [(int_nvvm_barrier_sync_cnt i32:$id, i32:$cnt)]>,
         Requires<[hasPTX<60>, hasSM<30>]>;
 def INT_BARRIER_SYNC_CNT_RI : NVPTXInst<(outs), (ins Int32Regs:$id, i32imm:$cnt),
                  "barrier.sync \t$id, $cnt;",
-                 [(int_nvvm_barrier_sync_cnt Int32Regs:$id, imm:$cnt)]>,
+                 [(int_nvvm_barrier_sync_cnt i32:$id, imm:$cnt)]>,
         Requires<[hasPTX<60>, hasSM<30>]>;
 def INT_BARRIER_SYNC_CNT_IR : NVPTXInst<(outs), (ins i32imm:$id, Int32Regs:$cnt),
                  "barrier.sync \t$id, $cnt;",
-                 [(int_nvvm_barrier_sync_cnt imm:$id, Int32Regs:$cnt)]>,
+                 [(int_nvvm_barrier_sync_cnt imm:$id, i32:$cnt)]>,
         Requires<[hasPTX<60>, hasSM<30>]>;
 def INT_BARRIER_SYNC_CNT_II : NVPTXInst<(outs), (ins i32imm:$id, i32imm:$cnt),
                  "barrier.sync \t$id, $cnt;",
@@ -230,7 +230,7 @@ foreach sync = [false, true] in {
 multiclass VOTE<NVPTXRegClass regclass, string mode, Intrinsic IntOp> {
   def : NVPTXInst<(outs regclass:$dest), (ins Int1Regs:$pred),
               "vote." # mode # " \t$dest, $pred;",
-              [(set regclass:$dest, (IntOp Int1Regs:$pred))]>,
+              [(set regclass:$dest, (IntOp i1:$pred))]>,
         Requires<[hasPTX<60>, hasSM<30>]>;
 }
 
@@ -243,11 +243,11 @@ defm VOTE_BALLOT : VOTE<Int32Regs, "ballot.b32", int_nvvm_vote_ballot>;
 multiclass VOTE_SYNC<NVPTXRegClass regclass, string mode, Intrinsic IntOp> {
   def i : NVPTXInst<(outs regclass:$dest), (ins i32imm:$mask, Int1Regs:$pred),
               "vote.sync." # mode # " \t$dest, $pred, $mask;",
-              [(set regclass:$dest, (IntOp imm:$mask, Int1Regs:$pred))]>,
+              [(set regclass:$dest, (IntOp imm:$mask, i1:$pred))]>,
           Requires<[hasPTX<60>, hasSM<30>]>;
   def r : NVPTXInst<(outs regclass:$dest), (ins Int32Regs:$mask, Int1Regs:$pred),
               "vote.sync." # mode #" \t$dest, $pred, $mask;",
-              [(set regclass:$dest, (IntOp Int32Regs:$mask, Int1Regs:$pred))]>,
+              [(set regclass:$dest, (IntOp i32:$mask, i1:$pred))]>,
           Requires<[hasPTX<60>, hasSM<30>]>;
 }
 
@@ -259,37 +259,37 @@ defm VOTE_SYNC_BALLOT : VOTE_SYNC<Int32Regs, "ballot.b32", int_nvvm_vote_ballot_
 // elect.sync
 def INT_ELECT_SYNC_I : NVPTXInst<(outs Int32Regs:$dest, Int1Regs:$pred), (ins i32imm:$mask),
             "elect.sync \t$dest|$pred, $mask;",
-            [(set Int32Regs:$dest, Int1Regs:$pred, (int_nvvm_elect_sync imm:$mask))]>,
+            [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync imm:$mask))]>,
             Requires<[hasPTX<80>, hasSM<90>]>;
 def INT_ELECT_SYNC_R : NVPTXInst<(outs Int32Regs:$dest, Int1Regs:$pred), (ins Int32Regs:$mask),
             "elect.sync \t$dest|$pred, $mask;",
-            [(set Int32Regs:$dest, Int1Regs:$pred, (int_nvvm_elect_sync Int32Regs:$mask))]>,
+            [(set i32:$dest, i1:$pred, (int_nvvm_elect_sync i32:$mask))]>,
             Requires<[hasPTX<80>, hasSM<90>]>;
 
 multiclass MATCH_ANY_SYNC<NVPTXRegClass regclass, string ptxtype, Intrinsic IntOp,
                           Operand ImmOp> {
   def ii : NVPTXInst<(outs Int32Regs:$dest), (ins i32imm:$mask, ImmOp:$value),
               "match.any.sync." # ptxtype # " \t$dest, $value, $mask;",
-              [(set Int32Regs:$dest, (IntOp imm:$mask, imm:$value))]>,
+              [(set i32:$dest, (IntOp imm:$mask, imm:$value))]>,
            Requires<[hasPTX<60>, hasSM<70>]>;
   def ir : NVPTXInst<(outs Int32Regs:$dest), (ins Int32Regs:$mask, ImmOp:$value),
               "match.any.sync." # ptxtype # " \t$dest, $value, $mask;",
-              [(set Int32Regs:$dest, (IntOp Int32Regs:$mask, imm:$value))]>,
+              [(set i32:$dest, (IntOp i32:$mask, imm:$value))]>,
            Requires<[hasPTX<60>, hasSM<70>]>;
   def ri : NVPTXInst<(outs Int32Regs:$dest), (ins i32imm:$mask, regclass:$value),
               "match.any.sync." # ptxtype # " \t$dest, $value, $mask;",
-              [(set Int32Regs:$dest, (IntOp imm:$mask, regclass:$value))]>,
+              [(set i32:$dest, (IntOp imm:$mask, regclass:$value))]>,
            Requires<[hasPTX<60>, hasSM<70>]>;
   def rr : NVPTXInst<(outs Int32Regs:$dest), (ins Int32Regs:$mask, regclass:$value),
               "match.any.sync." # ptxtype # " \t$dest, $value, $mask;",
-              [(set Int32Regs:$dest, (IntOp Int32Regs:$mask, regclass:$value))]>,
+              [(set i32:$dest, (IntOp i32:$mask, regclass:$value))]>,
            Requires<[hasPTX<60>, hasSM<70>]>;
 }
 
 // activemask.b32
 def ACTIVEMASK : NVPTXInst<(outs Int32Regs:$dest), (ins),
                     "activemask.b32 \t$dest;",
-                    [(set Int32Regs:$dest, (int_nvvm_activemask))]>,
+                    [(set i32:$dest, (int_nvvm_activemask))]>,
                  Requires<[hasPTX<62>, hasSM<30>]>;
 
 defm MATCH_ANY_SYNC_32 : MATCH_ANY_SYNC<Int32Regs, "b32", int_nvvm_match_any_sync_i32,
@@ -302,22 +302,22 @@ multiclass MATCH_ALLP_SYNC<NVPTXRegClass regclass, string ptxtype, Intrinsic Int
   def ii : NVPTXInst<(outs Int32Regs:$dest, Int1Regs:$pred),
                      (ins i32imm:$mask, ImmOp:$value),
               "match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;",
-              [(set Int32Regs:$dest, Int1Regs:$pred, (IntOp imm:$mask, imm:$value))]>,
+              [(set i32:$dest, i1:$pred, (IntOp imm:$mask, imm:$value))]>,
            Requires<[hasPTX<60>, hasSM<70>]>;
   def ir : NVPTXInst<(outs Int32Regs:$dest, Int1Regs:$pred),
                      (ins Int32Regs:$mask, ImmOp:$value),
               "match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;",
-              [(set Int32Regs:$dest, Int1Regs:$pred, (IntOp Int32Regs:$mask, imm:$value))]>,
+              [(set i32:$dest, i1:$pred, (IntOp i32:$mask, imm:$value))]>,
            Requires<[hasPTX<60>, hasSM<70>]>;
   def ri : NVPTXInst<(outs Int32Regs:$dest, Int1Regs:$pred),
                      (ins i32imm:$mask, regclass:$value),
               "match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;",
-              [(set Int32Regs:$dest, Int1Regs:$pred, (IntOp imm:$mask, regclass:$value))]>,
+              [(set i32:$dest, i1:$pred, (IntOp imm:$mask, regclass:$value))]>,
            Requires<[hasPTX<60>, hasSM<70>]>;
   def rr : NVPTXInst<(outs Int32Regs:$dest, Int1Regs:$pred),
                      (ins Int32Regs:$mask, regclass:$value),
               "match.all.sync." # ptxtype # " \t$dest|$pred, $value, $mask;",
-              [(set Int32Regs:$dest, Int1Regs:$pred, (IntOp Int32Regs:$mask, regclass:$value))]>,
+              [(set i32:$dest, i1:$pred, (IntOp i32:$mask, regclass:$value))]>,
            Requires<[hasPTX<60>, hasSM<70>]>;
 }
 defm MATCH_ALLP_SYNC_32 : MATCH_ALLP_SYNC<Int32Regs, "b32", int_nvvm_match_all_sync_i32p,
@@ -328,7 +328,7 @@ defm MATCH_ALLP_SYNC_64 : MATCH_ALLP_SYNC<Int64Regs, "b64", int_nvvm_match_all_s
 multiclass REDUX_SYNC<string BinOp, string PTXType, Intrinsic Intrin> {
   def : NVPTXInst<(outs Int32Regs:$dst), (ins Int32Regs:$src, Int32Regs:$mask),
           "redux.sync." # BinOp # "." # PTXType # " $dst, $src, $mask;",
-          [(set Int32Regs:$dst, (Intrin Int32Regs:$src, Int32Regs:$mask))]>,
+          [(set i32:$dst, (Intrin i32:$src, Int32Regs:$mask))]>,
         Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -384,7 +384,7 @@ def INT_FENCE_PROXY_TENSORMAP_GENERIC_RELEASE_SYS:
 class FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE<string Scope, Intrinsic Intr> :
         NVPTXInst<(outs), (ins Int64Regs:$addr),
                   "fence.proxy.tensormap::generic.acquire." # Scope # " [$addr], 128;",
-                  [(Intr Int64Regs:$addr, (i32 128))]>,
+                  [(Intr i64:$addr, (i32 128))]>,
         Requires<[hasPTX<83>, hasSM<90>]>;
 
 def INT_FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE_CTA :
@@ -407,11 +407,11 @@ def INT_FENCE_PROXY_TENSORMAP_GENERIC_ACQUIRE_SYS :
 multiclass CP_ASYNC_MBARRIER_ARRIVE<string NoInc, string AddrSpace, Intrinsic Intrin> {
   def _32 : NVPTXInst<(outs), (ins Int32Regs:$addr),
             !strconcat("cp.async.mbarrier.arrive", NoInc, AddrSpace, ".b64 [$addr];"),
-            [(Intrin Int32Regs:$addr)]>,
+            [(Intrin i32:$addr)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64 : NVPTXInst<(outs), (ins Int64Regs:$addr),
             !strconcat("cp.async.mbarrier.arrive", NoInc, AddrSpace, ".b64 [$addr];"),
-            [(Intrin Int64Regs:$addr)]>,
+            [(Intrin i64:$addr)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -427,28 +427,28 @@ defm CP_ASYNC_MBARRIER_ARRIVE_NOINC_SHARED :
 multiclass CP_ASYNC_SHARED_GLOBAL_I<string cc, string cpsize, Intrinsic Intrin, Intrinsic IntrinS> {
   def _32 : NVPTXInst<(outs), (ins Int32Regs:$dst, Int32Regs:$src),
             !strconcat("cp.async.", cc, ".shared.global [$dst], [$src], ", cpsize, ";"),
-            [(Intrin Int32Regs:$dst, Int32Regs:$src)]>,
+            [(Intrin i32:$dst, i32:$src)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64 : NVPTXInst<(outs), (ins Int64Regs:$dst, Int64Regs:$src),
             !strconcat("cp.async.", cc, ".shared.global [$dst], [$src], ", cpsize, ";"),
-            [(Intrin Int64Regs:$dst, Int64Regs:$src)]>,
+            [(Intrin i64:$dst, i64:$src)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   // Variant with src_size parameter
   def _32s : NVPTXInst<(outs), (ins Int32Regs:$dst, Int32Regs:$src, Int32Regs:$src_size),
              !strconcat("cp.async.", cc, ".shared.global [$dst], [$src], ", cpsize, ", $src_size;"),
-             [(IntrinS Int32Regs:$dst, Int32Regs:$src, Int32Regs:$src_size)]>,
+             [(IntrinS i32:$dst, i32:$src, i32:$src_size)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _32si: NVPTXInst<(outs), (ins Int32Regs:$dst, Int32Regs:$src, i32imm:$src_size),
              !strconcat("cp.async.", cc, ".shared.global [$dst], [$src], ", cpsize, ", $src_size;"),
-             [(IntrinS Int32Regs:$dst, Int32Regs:$src, imm:$src_size)]>,
+             [(IntrinS i32:$dst, i32:$src, imm:$src_size)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64s : NVPTXInst<(outs), (ins Int64Regs:$dst, Int64Regs:$src, Int32Regs:$src_size),
              !strconcat("cp.async.", cc, ".shared.global [$dst], [$src], ", cpsize, ", $src_size;"),
-             [(IntrinS Int64Regs:$dst, Int64Regs:$src, Int32Regs:$src_size)]>,
+             [(IntrinS i64:$dst, i64:$src, i32:$src_size)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64si: NVPTXInst<(outs), (ins Int64Regs:$dst, Int64Regs:$src, i32imm:$src_size),
              !strconcat("cp.async.", cc, ".shared.global [$dst], [$src], ", cpsize, ", $src_size;"),
-             [(IntrinS Int64Regs:$dst, Int64Regs:$src, imm:$src_size)]>,
+             [(IntrinS i64:$dst, i64:$src, imm:$src_size)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -474,7 +474,7 @@ def CP_ASYNC_COMMIT_GROUP :
 
 def CP_ASYNC_WAIT_GROUP :
   NVPTXInst<(outs), (ins i32imm:$n), "cp.async.wait_group $n;",
-  [(int_nvvm_cp_async_wait_group (i32 timm:$n))]>,
+  [(int_nvvm_cp_async_wait_group timm:$n)]>,
   Requires<[hasPTX<70>, hasSM<80>]>;
 
 def CP_ASYNC_WAIT_ALL :
@@ -490,12 +490,12 @@ def CP_ASYNC_BULK_COMMIT_GROUP :
 
 def CP_ASYNC_BULK_WAIT_GROUP :
   NVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group $n;",
-  [(int_nvvm_cp_async_bulk_wait_group (i32 timm:$n))]>,
+  [(int_nvvm_cp_async_bulk_wait_group timm:$n)]>,
   Requires<[hasPTX<80>, hasSM<90>]>;
 
 def CP_ASYNC_BULK_WAIT_GROUP_READ :
   NVPTXInst<(outs), (ins i32imm:$n), "cp.async.bulk.wait_group.read $n;",
-  [(int_nvvm_cp_async_bulk_wait_group_read (i32 timm:$n))]>,
+  [(int_nvvm_cp_async_bulk_wait_group_read timm:$n)]>,
   Requires<[hasPTX<80>, hasSM<90>]>;
 
 //-----------------------------------
@@ -686,11 +686,11 @@ foreach dim = [1, 2, 3, 4, 5] in {
 multiclass MBARRIER_INIT<string AddrSpace, Intrinsic Intrin> {
   def _32 : NVPTXInst<(outs), (ins Int32Regs:$addr, Int32Regs:$count),
            !strconcat("mbarrier.init", AddrSpace, ".b64 [$addr], $count;"),
-    [(Intrin Int32Regs:$addr, Int32Regs:$count)]>,
+    [(Intrin i32:$addr, i32:$count)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64 : NVPTXInst<(outs), (ins Int64Regs:$addr, Int32Regs:$count),
            !strconcat("mbarrier.init", AddrSpace, ".b64 [$addr], $count;"),
-    [(Intrin Int64Regs:$addr, Int32Regs:$count)]>,
+    [(Intrin i64:$addr, i32:$count)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -701,11 +701,11 @@ defm MBARRIER_INIT_SHARED : MBARRIER_INIT<".shared",
 multiclass MBARRIER_INVAL<string AddrSpace, Intrinsic Intrin> {
   def _32 : NVPTXInst<(outs), (ins Int32Regs:$addr),
            !strconcat("mbarrier.inval", AddrSpace, ".b64 [$addr];"),
-    [(Intrin Int32Regs:$addr)]>,
+    [(Intrin i32:$addr)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64 : NVPTXInst<(outs), (ins Int64Regs:$addr),
            !strconcat("mbarrier.inval", AddrSpace, ".b64 [$addr];"),
-    [(Intrin Int64Regs:$addr)]>,
+    [(Intrin i64:$addr)]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -716,11 +716,11 @@ defm MBARRIER_INVAL_SHARED : MBARRIER_INVAL<".shared",
 multiclass MBARRIER_ARRIVE<string AddrSpace, Intrinsic Intrin> {
   def _32 : NVPTXInst<(outs Int64Regs:$state), (ins Int32Regs:$addr),
            !strconcat("mbarrier.arrive", AddrSpace, ".b64 $state, [$addr];"),
-    [(set Int64Regs:$state, (Intrin Int32Regs:$addr))]>,
+    [(set i64:$state, (Intrin i32:$addr))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64 : NVPTXInst<(outs Int64Regs:$state), (ins Int64Regs:$addr),
            !strconcat("mbarrier.arrive", AddrSpace, ".b64 $state, [$addr];"),
-    [(set Int64Regs:$state, (Intrin Int64Regs:$addr))]>,
+    [(set i64:$state, (Intrin i64:$addr))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -733,13 +733,13 @@ multiclass MBARRIER_ARRIVE_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> {
            (ins Int32Regs:$addr, Int32Regs:$count),
            !strconcat("mbarrier.arrive.noComplete", AddrSpace,
                       ".b64 $state, [$addr], $count;"),
-    [(set Int64Regs:$state, (Intrin Int32Regs:$addr, Int32Regs:$count))]>,
+    [(set i64:$state, (Intrin i32:$addr, i32:$count))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64 : NVPTXInst<(outs Int64Regs:$state),
            (ins Int64Regs:$addr, Int32Regs:$count),
            !strconcat("mbarrier.arrive.noComplete", AddrSpace,
                       ".b64 $state, [$addr], $count;"),
-    [(set Int64Regs:$state, (Intrin Int64Regs:$addr, Int32Regs:$count))]>,
+    [(set i64:$state, (Intrin i64:$addr, i32:$count))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -752,12 +752,12 @@ multiclass MBARRIER_ARRIVE_DROP<string AddrSpace, Intrinsic Intrin> {
   def _32 : NVPTXInst<(outs Int64Regs:$state), (ins Int32Regs:$addr),
            !strconcat("mbarrier.arrive_drop", AddrSpace,
                       ".b64 $state, [$addr];"),
-           [(set Int64Regs:$state, (Intrin Int32Regs:$addr))]>,
+           [(set i64:$state, (Intrin i32:$addr))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64 : NVPTXInst<(outs Int64Regs:$state), (ins Int64Regs:$addr),
            !strconcat("mbarrier.arrive_drop", AddrSpace,
                       ".b64 $state, [$addr];"),
-           [(set Int64Regs:$state, (Intrin Int64Regs:$addr))]>,
+           [(set i64:$state, (Intrin i64:$addr))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -771,13 +771,13 @@ multiclass MBARRIER_ARRIVE_DROP_NOCOMPLETE<string AddrSpace, Intrinsic Intrin> {
            (ins Int32Regs:$addr, Int32Regs:$count),
            !strconcat("mbarrier.arrive_drop.noComplete", AddrSpace,
                       ".b64 $state, [$addr], $count;"),
-           [(set Int64Regs:$state, (Intrin Int32Regs:$addr, Int32Regs:$count))]>,
+           [(set i64:$state, (Intrin i32:$addr, i32:$count))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64 : NVPTXInst<(outs Int64Regs:$state),
            (ins Int64Regs:$addr, Int32Regs:$count),
            !strconcat("mbarrier.arrive_drop.noComplete", AddrSpace,
                       ".b64 $state, [$addr], $count;"),
-           [(set Int64Regs:$state, (Intrin Int64Regs:$addr, Int32Regs:$count))]>,
+           [(set i64:$state, (Intrin i64:$addr, i32:$count))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -790,11 +790,11 @@ defm MBARRIER_ARRIVE_DROP_NOCOMPLETE_SHARED :
 multiclass MBARRIER_TEST_WAIT<string AddrSpace, Intrinsic Intrin> {
   def _32 : NVPTXInst<(outs Int1Regs:$res), (ins Int32Regs:$addr, Int64Regs:$state),
            !strconcat("mbarrier.test_wait", AddrSpace, ".b64 $res, [$addr], $state;"),
-           [(set Int1Regs:$res, (Intrin Int32Regs:$addr, Int64Regs:$state))]>,
+           [(set i1:$res, (Intrin i32:$addr, i64:$state))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
   def _64 : NVPTXInst<(outs Int1Regs:$res), (ins Int64Regs:$addr, Int64Regs:$state),
            !strconcat("mbarrier.test_wait", AddrSpace, ".b64 $res, [$addr], $state;"),
-           [(set Int1Regs:$res, (Intrin Int64Regs:$addr, Int64Regs:$state))]>,
+           [(set i1:$res, (Intrin i64:$addr, i64:$state))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 }
 
@@ -806,7 +806,7 @@ defm MBARRIER_TEST_WAIT_SHARED :
 class MBARRIER_PENDING_COUNT<Intrinsic Intrin> :
            NVPTXInst<(outs Int32Regs:$res), (ins Int64Regs:$state),
            "mbarrier.pending_count.b64 $res, $state;",
-           [(set Int32Regs:$res, (Intrin Int64Regs:$state))]>,
+           [(set i32:$res, (Intrin i64:$state))]>,
     Requires<[hasPTX<70>, hasSM<80>]>;
 
 def MBARRIER_PENDING_COUNT :
@@ -823,29 +823,29 @@ def MBARRIER_PENDING_COUNT :
 // Same story for fmax, fmin.
 
 def : Pat<(int_nvvm_fmin_f immFloat1,
-            (int_nvvm_fmax_f immFloat0, Float32Regs:$a)),
+            (int_nvvm_fmax_f immFloat0, f32:$a)),
           (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
 def : Pat<(int_nvvm_fmin_f immFloat1,
-            (int_nvvm_fmax_f Float32Regs:$a, immFloat0)),
+            (int_nvvm_fmax_f f32:$a, immFloat0)),
           (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
 def : Pat<(int_nvvm_fmin_f
-            (int_nvvm_fmax_f immFloat0, Float32Regs:$a), immFloat1),
+            (int_nvvm_fmax_f immFloat0, f32:$a), immFloat1),
           (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
 def : Pat<(int_nvvm_fmin_f
-            (int_nvvm_fmax_f Float32Regs:$a, immFloat0), immFloat1),
+            (int_nvvm_fmax_f f32:$a, immFloat0), immFloat1),
           (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
 
 def : Pat<(int_nvvm_fmin_d immDouble1,
-            (int_nvvm_fmax_d immDouble0, Float64Regs:$a)),
+            (int_nvvm_fmax_d immDouble0, f64:$a)),
           (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
 def : Pat<(int_nvvm_fmin_d immDouble1,
-            (int_nvvm_fmax_d Float64Regs:$a, immDouble0)),
+            (int_nvvm_fmax_d f64:$a, immDouble0)),
           (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
 def : Pat<(int_nvvm_fmin_d
-            (int_nvvm_fmax_d immDouble0, Float64Regs:$a), immDouble1),
+            (int_nvvm_fmax_d immDouble0, f64:$a), immDouble1),
           (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
 def : Pat<(int_nvvm_fmin_d
-            (int_nvvm_fmax_d Float64Regs:$a, immDouble0), immDouble1),
+            (int_nvvm_fmax_d f64:$a, immDouble0), immDouble1),
           (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
 
 
@@ -890,7 +890,7 @@ def INT_NVVM_NANOSLEEP_I : NVPTXInst<(outs), (ins i32imm:$i), "nanosleep.u32 \t$
                              [(int_nvvm_nanosleep imm:$i)]>,
         Requires<[hasPTX<63>, hasSM<70>]>;
 def INT_NVVM_NANOSLEEP_R : NVPTXInst<(outs), (ins Int32Regs:$i), "nanosleep.u32 \t$i;",
-                             [(int_nvvm_nanosleep Int32Regs:$i)]>,
+                             [(int_nvvm_nanosleep i32:$i)]>,
         Requires<[hasPTX<63>, hasSM<70>]>;
 //
 // Min Max
@@ -1124,16 +1124,16 @@ def INT_NVVM_DIV_RM_D : F_MATH_2<"div.rm.f64 \t$dst, $src0, $src1;",
 def INT_NVVM_DIV_RP_D : F_MATH_2<"div.rp.f64 \t$dst, $src0, $src1;",
   Float64Regs, Float64Regs, Float64Regs, int_nvvm_div_rp_d>;
 
-def : Pat<(int_nvvm_div_full Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_div_full f32:$a, f32:$b),
           (FDIV32rr Float32Regs:$a, Float32Regs:$b)>;
 
-def : Pat<(int_nvvm_div_full Float32Regs:$a, fpimm:$b),
+def : Pat<(int_nvvm_div_full f32:$a, fpimm:$b),
           (FDIV32ri Float32Regs:$a, f32imm:$b)>;
 
-def : Pat<(int_nvvm_div_full_ftz Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_div_full_ftz f32:$a, f32:$b),
           (FDIV32rr_ftz Float32Regs:$a, Float32Regs:$b)>;
 
-def : Pat<(int_nvvm_div_full_ftz Float32Regs:$a, fpimm:$b),
+def : Pat<(int_nvvm_div_full_ftz f32:$a, fpimm:$b),
           (FDIV32ri_ftz Float32Regs:$a, f32imm:$b)>;
 
 //
@@ -1157,18 +1157,18 @@ def INT_NVVM_SAD_ULL : F_MATH_3<"sad.u64 \t$dst, $src0, $src1, $src2;",
 // Floor  Ceil
 //
 
-def : Pat<(int_nvvm_floor_ftz_f Float32Regs:$a),
+def : Pat<(int_nvvm_floor_ftz_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtRMI_FTZ)>;
-def : Pat<(int_nvvm_floor_f Float32Regs:$a),
+def : Pat<(int_nvvm_floor_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtRMI)>;
-def : Pat<(int_nvvm_floor_d Float64Regs:$a),
+def : Pat<(int_nvvm_floor_d f64:$a),
           (CVT_f64_f64 Float64Regs:$a, CvtRMI)>;
 
-def : Pat<(int_nvvm_ceil_ftz_f Float32Regs:$a),
+def : Pat<(int_nvvm_ceil_ftz_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtRPI_FTZ)>;
-def : Pat<(int_nvvm_ceil_f Float32Regs:$a),
+def : Pat<(int_nvvm_ceil_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtRPI)>;
-def : Pat<(int_nvvm_ceil_d Float64Regs:$a),
+def : Pat<(int_nvvm_ceil_d f64:$a),
           (CVT_f64_f64 Float64Regs:$a, CvtRPI)>;
 
 //
@@ -1192,12 +1192,12 @@ def fcopysign_nvptx : SDNode<"NVPTXISD::FCOPYSIGN", SDTFPBinOp>;
 def COPYSIGN_F :
     NVPTXInst<(outs Float32Regs:$dst), (ins Float32Regs:$src0, Float32Regs:$src1),
               "copysign.f32 \t$dst, $src0, $src1;",
-              [(set Float32Regs:$dst, (fcopysign_nvptx Float32Regs:$src1, Float32Regs:$src0))]>;
+              [(set f32:$dst, (fcopysign_nvptx f32:$src1, f32:$src0))]>;
 
 def COPYSIGN_D :
     NVPTXInst<(outs Float64Regs:$dst), (ins Float64Regs:$src0, Float64Regs:$src1),
               "copysign.f64 \t$dst, $src0, $src1;",
-              [(set Float64Regs:$dst, (fcopysign_nvptx Float64Regs:$src1, Float64Regs:$src0))]>;
+              [(set f64:$dst, (fcopysign_nvptx f64:$src1, f64:$src0))]>;
 
 //
 // Abs, Neg bf16, bf16x2
@@ -1216,33 +1216,33 @@ def INT_NVVM_NEG_BF16X2 : F_MATH_1<"neg.bf16x2 \t$dst, $src0;", Int32Regs,
 // Round
 //
 
-def : Pat<(int_nvvm_round_ftz_f Float32Regs:$a),
+def : Pat<(int_nvvm_round_ftz_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtRNI_FTZ)>;
-def : Pat<(int_nvvm_round_f Float32Regs:$a),
+def : Pat<(int_nvvm_round_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtRNI)>;
-def : Pat<(int_nvvm_round_d Float64Regs:$a),
+def : Pat<(int_nvvm_round_d f64:$a),
           (CVT_f64_f64 Float64Regs:$a, CvtRNI)>;
 
 //
 // Trunc
 //
 
-def : Pat<(int_nvvm_trunc_ftz_f Float32Regs:$a),
+def : Pat<(int_nvvm_trunc_ftz_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtRZI_FTZ)>;
-def : Pat<(int_nvvm_trunc_f Float32Regs:$a),
+def : Pat<(int_nvvm_trunc_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(int_nvvm_trunc_d Float64Regs:$a),
+def : Pat<(int_nvvm_trunc_d f64:$a),
           (CVT_f64_f64 Float64Regs:$a, CvtRZI)>;
 
 //
 // Saturate
 //
 
-def : Pat<(int_nvvm_saturate_ftz_f Float32Regs:$a),
+def : Pat<(int_nvvm_saturate_ftz_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtSAT_FTZ)>;
-def : Pat<(int_nvvm_saturate_f Float32Regs:$a),
+def : Pat<(int_nvvm_saturate_f f32:$a),
           (CVT_f32_f32 Float32Regs:$a, CvtSAT)>;
-def : Pat<(int_nvvm_saturate_d Float64Regs:$a),
+def : Pat<(int_nvvm_saturate_d f64:$a),
           (CVT_f64_f64 Float64Regs:$a, CvtSAT)>;
 
 //
@@ -1429,13 +1429,13 @@ def INT_NVVM_SQRT_RP_D : F_MATH_1<"sqrt.rp.f64 \t$dst, $src0;", Float64Regs,
   Float64Regs, int_nvvm_sqrt_rp_d>;
 
 // nvvm_sqrt intrinsic
-def : Pat<(int_nvvm_sqrt_f Float32Regs:$a),
+def : Pat<(int_nvvm_sqrt_f f32:$a),
           (INT_NVVM_SQRT_RN_FTZ_F Float32Regs:$a)>, Requires<[doF32FTZ, do_SQRTF32_RN]>;
-def : Pat<(int_nvvm_sqrt_f Float32Regs:$a),
+def : Pat<(int_nvvm_sqrt_f f32:$a),
           (INT_NVVM_SQRT_RN_F Float32Regs:$a)>, Requires<[do_SQRTF32_RN]>;
-def : Pat<(int_nvvm_sqrt_f Float32Regs:$a),
+def : Pat<(int_nvvm_sqrt_f f32:$a),
           (INT_NVVM_SQRT_APPROX_FTZ_F Float32Regs:$a)>, Requires<[doF32FTZ]>;
-def : Pat<(int_nvvm_sqrt_f Float32Regs:$a),
+def : Pat<(int_nvvm_sqrt_f f32:$a),
           (INT_NVVM_SQRT_APPROX_F Float32Regs:$a)>;
 
 //
@@ -1455,24 +1455,24 @@ def INT_NVVM_RSQRT_APPROX_D : F_MATH_1<"rsqrt.approx.f64 \t$dst, $src0;",
   Float64Regs, Float64Regs, int_nvvm_rsqrt_approx_d>;
 
 // 1.0f / sqrt_approx -> rsqrt_approx
-def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_approx_f Float32Regs:$a)),
+def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_approx_f f32:$a)),
          (INT_NVVM_RSQRT_APPROX_F Float32Regs:$a)>,
          Requires<[doRsqrtOpt]>;
-def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_approx_ftz_f Float32Regs:$a)),
+def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_approx_ftz_f f32:$a)),
          (INT_NVVM_RSQRT_APPROX_FTZ_F Float32Regs:$a)>,
          Requires<[doRsqrtOpt]>;
 // same for int_nvvm_sqrt_f when non-precision sqrt is requested
-def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_f Float32Regs:$a)),
+def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_f f32:$a)),
          (INT_NVVM_RSQRT_APPROX_F Float32Regs:$a)>,
          Requires<[doRsqrtOpt, do_SQRTF32_APPROX, doNoF32FTZ]>;
-def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_f Float32Regs:$a)),
+def: Pat<(fdiv FloatConst1, (int_nvvm_sqrt_f f32:$a)),
          (INT_NVVM_RSQRT_APPROX_FTZ_F Float32Regs:$a)>,
          Requires<[doRsqrtOpt, do_SQRTF32_APPROX, doF32FTZ]>;
 
-def: Pat<(fdiv FloatConst1, (fsqrt Float32Regs:$a)),
+def: Pat<(fdiv FloatConst1, (fsqrt f32:$a)),
          (INT_NVVM_RSQRT_APPROX_F Float32Regs:$a)>,
          Requires<[doRsqrtOpt, do_SQRTF32_APPROX, doNoF32FTZ]>;
-def: Pat<(fdiv FloatConst1, (fsqrt Float32Regs:$a)),
+def: Pat<(fdiv FloatConst1, (fsqrt f32:$a)),
          (INT_NVVM_RSQRT_APPROX_FTZ_F Float32Regs:$a)>,
          Requires<[doRsqrtOpt, do_SQRTF32_APPROX, doF32FTZ]>;
 //
@@ -1515,12 +1515,12 @@ foreach t = [I32RT, I64RT] in {
     def BFIND_ # sign # t.Size
       : NVPTXInst<(outs Int32Regs:$dst), (ins t.RC:$src),
                   "bfind." # sign # t.Size # " \t$dst, $src;",
-                  [(set (i32 Int32Regs:$dst), (flo_intrin (t.Ty t.RC:$src), 0))]>;
+                  [(set i32:$dst, (flo_intrin t.Ty:$src, 0))]>;
 
     def BFIND_SHIFTAMT_ # sign # t.Size
       : NVPTXInst<(outs Int32Regs:$dst), (ins t.RC:$src),
                   "bfind.shiftamt." # sign # t.Size # " \t$dst, $src;",
-                  [(set (i32 Int32Regs:$dst), (flo_intrin (t.Ty t.RC:$src), -1))]>;
+                  [(set i32:$dst, (flo_intrin t.Ty:$src, -1))]>;
   }
 }
 
@@ -1528,142 +1528,142 @@ foreach t = [I32RT, I64RT] in {
 // Convert
 //
 
-def : Pat<(int_nvvm_d2f_rn_ftz Float64Regs:$a),
+def : Pat<(int_nvvm_d2f_rn_ftz f64:$a),
           (CVT_f32_f64 Float64Regs:$a, CvtRN_FTZ)>;
-def : Pat<(int_nvvm_d2f_rn Float64Regs:$a),
+def : Pat<(int_nvvm_d2f_rn f64:$a),
           (CVT_f32_f64 Float64Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_d2f_rz_ftz Float64Regs:$a),
+def : Pat<(int_nvvm_d2f_rz_ftz f64:$a),
           (CVT_f32_f64 Float64Regs:$a, CvtRZ_FTZ)>;
-def : Pat<(int_nvvm_d2f_rz Float64Regs:$a),
+def : Pat<(int_nvvm_d2f_rz f64:$a),
           (CVT_f32_f64 Float64Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_d2f_rm_ftz Float64Regs:$a),
+def : Pat<(int_nvvm_d2f_rm_ftz f64:$a),
           (CVT_f32_f64 Float64Regs:$a, CvtRM_FTZ)>;
-def : Pat<(int_nvvm_d2f_rm Float64Regs:$a),
+def : Pat<(int_nvvm_d2f_rm f64:$a),
           (CVT_f32_f64 Float64Regs:$a, CvtRM)>;
-def : Pat<(int_nvvm_d2f_rp_ftz Float64Regs:$a),
+def : Pat<(int_nvvm_d2f_rp_ftz f64:$a),
           (CVT_f32_f64 Float64Regs:$a, CvtRP_FTZ)>;
-def : Pat<(int_nvvm_d2f_rp Float64Regs:$a),
+def : Pat<(int_nvvm_d2f_rp f64:$a),
           (CVT_f32_f64 Float64Regs:$a, CvtRP)>;
 
-def : Pat<(int_nvvm_d2i_rn Float64Regs:$a),
+def : Pat<(int_nvvm_d2i_rn f64:$a),
           (CVT_s32_f64 Float64Regs:$a, CvtRNI)>;
-def : Pat<(int_nvvm_d2i_rz Float64Regs:$a),
+def : Pat<(int_nvvm_d2i_rz f64:$a),
           (CVT_s32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(int_nvvm_d2i_rm Float64Regs:$a),
+def : Pat<(int_nvvm_d2i_rm f64:$a),
           (CVT_s32_f64 Float64Regs:$a, CvtRMI)>;
-def : Pat<(int_nvvm_d2i_rp Float64Regs:$a),
+def : Pat<(int_nvvm_d2i_rp f64:$a),
           (CVT_s32_f64 Float64Regs:$a, CvtRPI)>;
 
-def : Pat<(int_nvvm_d2ui_rn Float64Regs:$a),
+def : Pat<(int_nvvm_d2ui_rn f64:$a),
           (CVT_u32_f64 Float64Regs:$a, CvtRNI)>;
-def : Pat<(int_nvvm_d2ui_rz Float64Regs:$a),
+def : Pat<(int_nvvm_d2ui_rz f64:$a),
           (CVT_u32_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(int_nvvm_d2ui_rm Float64Regs:$a),
+def : Pat<(int_nvvm_d2ui_rm f64:$a),
           (CVT_u32_f64 Float64Regs:$a, CvtRMI)>;
-def : Pat<(int_nvvm_d2ui_rp Float64Regs:$a),
+def : Pat<(int_nvvm_d2ui_rp f64:$a),
           (CVT_u32_f64 Float64Regs:$a, CvtRPI)>;
 
-def : Pat<(int_nvvm_i2d_rn Int32Regs:$a),
+def : Pat<(int_nvvm_i2d_rn i32:$a),
           (CVT_f64_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_i2d_rz Int32Regs:$a),
+def : Pat<(int_nvvm_i2d_rz i32:$a),
           (CVT_f64_s32 Int32Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_i2d_rm Int32Regs:$a),
+def : Pat<(int_nvvm_i2d_rm i32:$a),
           (CVT_f64_s32 Int32Regs:$a, CvtRM)>;
-def : Pat<(int_nvvm_i2d_rp Int32Regs:$a),
+def : Pat<(int_nvvm_i2d_rp i32:$a),
           (CVT_f64_s32 Int32Regs:$a, CvtRP)>;
 
-def : Pat<(int_nvvm_ui2d_rn Int32Regs:$a),
+def : Pat<(int_nvvm_ui2d_rn i32:$a),
           (CVT_f64_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_ui2d_rz Int32Regs:$a),
+def : Pat<(int_nvvm_ui2d_rz i32:$a),
           (CVT_f64_u32 Int32Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_ui2d_rm Int32Regs:$a),
+def : Pat<(int_nvvm_ui2d_rm i32:$a),
           (CVT_f64_u32 Int32Regs:$a, CvtRM)>;
-def : Pat<(int_nvvm_ui2d_rp Int32Regs:$a),
+def : Pat<(int_nvvm_ui2d_rp i32:$a),
           (CVT_f64_u32 Int32Regs:$a, CvtRP)>;
 
-def : Pat<(int_nvvm_f2i_rn_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2i_rn_ftz f32:$a),
           (CVT_s32_f32 Float32Regs:$a, CvtRNI_FTZ)>;
-def : Pat<(int_nvvm_f2i_rn Float32Regs:$a),
+def : Pat<(int_nvvm_f2i_rn f32:$a),
           (CVT_s32_f32 Float32Regs:$a, CvtRNI)>;
-def : Pat<(int_nvvm_f2i_rz_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2i_rz_ftz f32:$a),
           (CVT_s32_f32 Float32Regs:$a, CvtRZI_FTZ)>;
-def : Pat<(int_nvvm_f2i_rz Float32Regs:$a),
+def : Pat<(int_nvvm_f2i_rz f32:$a),
           (CVT_s32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(int_nvvm_f2i_rm_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2i_rm_ftz f32:$a),
           (CVT_s32_f32 Float32Regs:$a, CvtRMI_FTZ)>;
-def : Pat<(int_nvvm_f2i_rm Float32Regs:$a),
+def : Pat<(int_nvvm_f2i_rm f32:$a),
           (CVT_s32_f32 Float32Regs:$a, CvtRMI)>;
-def : Pat<(int_nvvm_f2i_rp_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2i_rp_ftz f32:$a),
           (CVT_s32_f32 Float32Regs:$a, CvtRPI_FTZ)>;
-def : Pat<(int_nvvm_f2i_rp Float32Regs:$a),
+def : Pat<(int_nvvm_f2i_rp f32:$a),
           (CVT_s32_f32 Float32Regs:$a, CvtRPI)>;
 
-def : Pat<(int_nvvm_f2ui_rn_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ui_rn_ftz f32:$a),
           (CVT_u32_f32 Float32Regs:$a, CvtRNI_FTZ)>;
-def : Pat<(int_nvvm_f2ui_rn Float32Regs:$a),
+def : Pat<(int_nvvm_f2ui_rn f32:$a),
           (CVT_u32_f32 Float32Regs:$a, CvtRNI)>;
-def : Pat<(int_nvvm_f2ui_rz_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ui_rz_ftz f32:$a),
           (CVT_u32_f32 Float32Regs:$a, CvtRZI_FTZ)>;
-def : Pat<(int_nvvm_f2ui_rz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ui_rz f32:$a),
           (CVT_u32_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(int_nvvm_f2ui_rm_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ui_rm_ftz f32:$a),
           (CVT_u32_f32 Float32Regs:$a, CvtRMI_FTZ)>;
-def : Pat<(int_nvvm_f2ui_rm Float32Regs:$a),
+def : Pat<(int_nvvm_f2ui_rm f32:$a),
           (CVT_u32_f32 Float32Regs:$a, CvtRMI)>;
-def : Pat<(int_nvvm_f2ui_rp_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ui_rp_ftz f32:$a),
           (CVT_u32_f32 Float32Regs:$a, CvtRPI_FTZ)>;
-def : Pat<(int_nvvm_f2ui_rp Float32Regs:$a),
+def : Pat<(int_nvvm_f2ui_rp f32:$a),
           (CVT_u32_f32 Float32Regs:$a, CvtRPI)>;
 
-def : Pat<(int_nvvm_i2f_rn Int32Regs:$a),
+def : Pat<(int_nvvm_i2f_rn i32:$a),
           (CVT_f32_s32 Int32Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_i2f_rz Int32Regs:$a),
+def : Pat<(int_nvvm_i2f_rz i32:$a),
           (CVT_f32_s32 Int32Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_i2f_rm Int32Regs:$a),
+def : Pat<(int_nvvm_i2f_rm i32:$a),
           (CVT_f32_s32 Int32Regs:$a, CvtRM)>;
-def : Pat<(int_nvvm_i2f_rp Int32Regs:$a),
+def : Pat<(int_nvvm_i2f_rp i32:$a),
           (CVT_f32_s32 Int32Regs:$a, CvtRP)>;
 
-def : Pat<(int_nvvm_ui2f_rn Int32Regs:$a),
+def : Pat<(int_nvvm_ui2f_rn i32:$a),
           (CVT_f32_u32 Int32Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_ui2f_rz Int32Regs:$a),
+def : Pat<(int_nvvm_ui2f_rz i32:$a),
           (CVT_f32_u32 Int32Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_ui2f_rm Int32Regs:$a),
+def : Pat<(int_nvvm_ui2f_rm i32:$a),
           (CVT_f32_u32 Int32Regs:$a, CvtRM)>;
-def : Pat<(int_nvvm_ui2f_rp Int32Regs:$a),
+def : Pat<(int_nvvm_ui2f_rp i32:$a),
           (CVT_f32_u32 Int32Regs:$a, CvtRP)>;
 
-def : Pat<(int_nvvm_ff2bf16x2_rn Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff2bf16x2_rn f32:$a, f32:$b),
           (CVT_bf16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN)>;
-def : Pat<(int_nvvm_ff2bf16x2_rn_relu Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff2bf16x2_rn_relu f32:$a, f32:$b),
           (CVT_bf16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN_RELU)>;
-def : Pat<(int_nvvm_ff2bf16x2_rz Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff2bf16x2_rz f32:$a, f32:$b),
           (CVT_bf16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRZ)>;
-def : Pat<(int_nvvm_ff2bf16x2_rz_relu Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff2bf16x2_rz_relu f32:$a, f32:$b),
           (CVT_bf16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRZ_RELU)>;
 
-def : Pat<(int_nvvm_ff2f16x2_rn Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff2f16x2_rn f32:$a, f32:$b),
           (CVT_f16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN)>;
-def : Pat<(int_nvvm_ff2f16x2_rn_relu Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff2f16x2_rn_relu f32:$a, f32:$b),
           (CVT_f16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN_RELU)>;
-def : Pat<(int_nvvm_ff2f16x2_rz Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff2f16x2_rz f32:$a, f32:$b),
           (CVT_f16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRZ)>;
-def : Pat<(int_nvvm_ff2f16x2_rz_relu Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff2f16x2_rz_relu f32:$a, f32:$b),
           (CVT_f16x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRZ_RELU)>;
 
-def : Pat<(int_nvvm_f2bf16_rn Float32Regs:$a),
+def : Pat<(int_nvvm_f2bf16_rn f32:$a),
           (CVT_bf16_f32 Float32Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_f2bf16_rn_relu Float32Regs:$a),
+def : Pat<(int_nvvm_f2bf16_rn_relu f32:$a),
           (CVT_bf16_f32 Float32Regs:$a, CvtRN_RELU)>;
-def : Pat<(int_nvvm_f2bf16_rz Float32Regs:$a),
+def : Pat<(int_nvvm_f2bf16_rz f32:$a),
           (CVT_bf16_f32 Float32Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_f2bf16_rz_relu Float32Regs:$a),
+def : Pat<(int_nvvm_f2bf16_rz_relu f32:$a),
           (CVT_bf16_f32 Float32Regs:$a, CvtRZ_RELU)>;
 
 def CVT_tf32_f32 :
    NVPTXInst<(outs Int32Regs:$dest), (ins Float32Regs:$a),
                    "cvt.rna.tf32.f32 \t$dest, $a;",
-       [(set Int32Regs:$dest, (int_nvvm_f2tf32_rna Float32Regs:$a))]>;
+       [(set i32:$dest, (int_nvvm_f2tf32_rna f32:$a))]>;
 
 def INT_NVVM_LOHI_I2D : F_MATH_2<"mov.b64 \t$dst, {{$src0, $src1}};",
   Float64Regs, Int32Regs, Int32Regs, int_nvvm_lohi_i2d>;
@@ -1681,107 +1681,107 @@ def INT_NVVM_D2I_HI : F_MATH_1<
              "}}"),
   Int32Regs, Float64Regs, int_nvvm_d2i_hi>;
 
-def : Pat<(int_nvvm_f2ll_rn_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ll_rn_ftz f32:$a),
           (CVT_s64_f32 Float32Regs:$a, CvtRNI_FTZ)>;
-def : Pat<(int_nvvm_f2ll_rn Float32Regs:$a),
+def : Pat<(int_nvvm_f2ll_rn f32:$a),
           (CVT_s64_f32 Float32Regs:$a, CvtRNI)>;
-def : Pat<(int_nvvm_f2ll_rz_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ll_rz_ftz f32:$a),
           (CVT_s64_f32 Float32Regs:$a, CvtRZI_FTZ)>;
-def : Pat<(int_nvvm_f2ll_rz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ll_rz f32:$a),
           (CVT_s64_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(int_nvvm_f2ll_rm_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ll_rm_ftz f32:$a),
           (CVT_s64_f32 Float32Regs:$a, CvtRMI_FTZ)>;
-def : Pat<(int_nvvm_f2ll_rm Float32Regs:$a),
+def : Pat<(int_nvvm_f2ll_rm f32:$a),
           (CVT_s64_f32 Float32Regs:$a, CvtRMI)>;
-def : Pat<(int_nvvm_f2ll_rp_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ll_rp_ftz f32:$a),
           (CVT_s64_f32 Float32Regs:$a, CvtRPI_FTZ)>;
-def : Pat<(int_nvvm_f2ll_rp Float32Regs:$a),
+def : Pat<(int_nvvm_f2ll_rp f32:$a),
           (CVT_s64_f32 Float32Regs:$a, CvtRPI)>;
 
-def : Pat<(int_nvvm_f2ull_rn_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ull_rn_ftz f32:$a),
           (CVT_u64_f32 Float32Regs:$a, CvtRNI_FTZ)>;
-def : Pat<(int_nvvm_f2ull_rn Float32Regs:$a),
+def : Pat<(int_nvvm_f2ull_rn f32:$a),
           (CVT_u64_f32 Float32Regs:$a, CvtRNI)>;
-def : Pat<(int_nvvm_f2ull_rz_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ull_rz_ftz f32:$a),
           (CVT_u64_f32 Float32Regs:$a, CvtRZI_FTZ)>;
-def : Pat<(int_nvvm_f2ull_rz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ull_rz f32:$a),
           (CVT_u64_f32 Float32Regs:$a, CvtRZI)>;
-def : Pat<(int_nvvm_f2ull_rm_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ull_rm_ftz f32:$a),
           (CVT_u64_f32 Float32Regs:$a, CvtRMI_FTZ)>;
-def : Pat<(int_nvvm_f2ull_rm Float32Regs:$a),
+def : Pat<(int_nvvm_f2ull_rm f32:$a),
           (CVT_u64_f32 Float32Regs:$a, CvtRMI)>;
-def : Pat<(int_nvvm_f2ull_rp_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2ull_rp_ftz f32:$a),
           (CVT_u64_f32 Float32Regs:$a, CvtRPI_FTZ)>;
-def : Pat<(int_nvvm_f2ull_rp Float32Regs:$a),
+def : Pat<(int_nvvm_f2ull_rp f32:$a),
           (CVT_u64_f32 Float32Regs:$a, CvtRPI)>;
 
-def : Pat<(int_nvvm_d2ll_rn Float64Regs:$a),
+def : Pat<(int_nvvm_d2ll_rn f64:$a),
           (CVT_s64_f64 Float64Regs:$a, CvtRNI)>;
-def : Pat<(int_nvvm_d2ll_rz Float64Regs:$a),
+def : Pat<(int_nvvm_d2ll_rz f64:$a),
           (CVT_s64_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(int_nvvm_d2ll_rm Float64Regs:$a),
+def : Pat<(int_nvvm_d2ll_rm f64:$a),
           (CVT_s64_f64 Float64Regs:$a, CvtRMI)>;
-def : Pat<(int_nvvm_d2ll_rp Float64Regs:$a),
+def : Pat<(int_nvvm_d2ll_rp f64:$a),
           (CVT_s64_f64 Float64Regs:$a, CvtRPI)>;
 
-def : Pat<(int_nvvm_d2ull_rn Float64Regs:$a),
+def : Pat<(int_nvvm_d2ull_rn f64:$a),
           (CVT_u64_f64 Float64Regs:$a, CvtRNI)>;
-def : Pat<(int_nvvm_d2ull_rz Float64Regs:$a),
+def : Pat<(int_nvvm_d2ull_rz f64:$a),
           (CVT_u64_f64 Float64Regs:$a, CvtRZI)>;
-def : Pat<(int_nvvm_d2ull_rm Float64Regs:$a),
+def : Pat<(int_nvvm_d2ull_rm f64:$a),
           (CVT_u64_f64 Float64Regs:$a, CvtRMI)>;
-def : Pat<(int_nvvm_d2ull_rp Float64Regs:$a),
+def : Pat<(int_nvvm_d2ull_rp f64:$a),
           (CVT_u64_f64 Float64Regs:$a, CvtRPI)>;
 
-def : Pat<(int_nvvm_ll2f_rn Int64Regs:$a),
+def : Pat<(int_nvvm_ll2f_rn i64:$a),
           (CVT_f32_s64 Int64Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_ll2f_rz Int64Regs:$a),
+def : Pat<(int_nvvm_ll2f_rz i64:$a),
           (CVT_f32_s64 Int64Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_ll2f_rm Int64Regs:$a),
+def : Pat<(int_nvvm_ll2f_rm i64:$a),
           (CVT_f32_s64 Int64Regs:$a, CvtRM)>;
-def : Pat<(int_nvvm_ll2f_rp Int64Regs:$a),
+def : Pat<(int_nvvm_ll2f_rp i64:$a),
           (CVT_f32_s64 Int64Regs:$a, CvtRP)>;
 
-def : Pat<(int_nvvm_ull2f_rn Int64Regs:$a),
+def : Pat<(int_nvvm_ull2f_rn i64:$a),
           (CVT_f32_u64 Int64Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_ull2f_rz Int64Regs:$a),
+def : Pat<(int_nvvm_ull2f_rz i64:$a),
           (CVT_f32_u64 Int64Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_ull2f_rm Int64Regs:$a),
+def : Pat<(int_nvvm_ull2f_rm i64:$a),
           (CVT_f32_u64 Int64Regs:$a, CvtRM)>;
-def : Pat<(int_nvvm_ull2f_rp Int64Regs:$a),
+def : Pat<(int_nvvm_ull2f_rp i64:$a),
           (CVT_f32_u64 Int64Regs:$a, CvtRP)>;
 
-def : Pat<(int_nvvm_ll2d_rn Int64Regs:$a),
+def : Pat<(int_nvvm_ll2d_rn i64:$a),
           (CVT_f64_s64 Int64Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_ll2d_rz Int64Regs:$a),
+def : Pat<(int_nvvm_ll2d_rz i64:$a),
           (CVT_f64_s64 Int64Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_ll2d_rm Int64Regs:$a),
+def : Pat<(int_nvvm_ll2d_rm i64:$a),
           (CVT_f64_s64 Int64Regs:$a, CvtRM)>;
-def : Pat<(int_nvvm_ll2d_rp Int64Regs:$a),
+def : Pat<(int_nvvm_ll2d_rp i64:$a),
           (CVT_f64_s64 Int64Regs:$a, CvtRP)>;
 
-def : Pat<(int_nvvm_ull2d_rn Int64Regs:$a),
+def : Pat<(int_nvvm_ull2d_rn i64:$a),
           (CVT_f64_u64 Int64Regs:$a, CvtRN)>;
-def : Pat<(int_nvvm_ull2d_rz Int64Regs:$a),
+def : Pat<(int_nvvm_ull2d_rz i64:$a),
           (CVT_f64_u64 Int64Regs:$a, CvtRZ)>;
-def : Pat<(int_nvvm_ull2d_rm Int64Regs:$a),
+def : Pat<(int_nvvm_ull2d_rm i64:$a),
           (CVT_f64_u64 Int64Regs:$a, CvtRM)>;
-def : Pat<(int_nvvm_ull2d_rp Int64Regs:$a),
+def : Pat<(int_nvvm_ull2d_rp i64:$a),
           (CVT_f64_u64 Int64Regs:$a, CvtRP)>;
 
 
-def : Pat<(int_nvvm_f2h_rn_ftz Float32Regs:$a),
+def : Pat<(int_nvvm_f2h_rn_ftz f32:$a),
           (CVT_f16_f32 Float32Regs:$a, CvtRN_FTZ)>;
-def : Pat<(int_nvvm_f2h_rn Float32Regs:$a),
+def : Pat<(int_nvvm_f2h_rn f32:$a),
           (CVT_f16_f32 Float32Regs:$a, CvtRN)>;
 
-def : Pat<(int_nvvm_ff_to_e4m3x2_rn Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff_to_e4m3x2_rn f32:$a, f32:$b),
           (CVT_e4m3x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN)>;
-def : Pat<(int_nvvm_ff_to_e4m3x2_rn_relu Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff_to_e4m3x2_rn_relu f32:$a, f32:$b),
           (CVT_e4m3x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN_RELU)>;
-def : Pat<(int_nvvm_ff_to_e5m2x2_rn Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff_to_e5m2x2_rn f32:$a, f32:$b),
           (CVT_e5m2x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN)>;
-def : Pat<(int_nvvm_ff_to_e5m2x2_rn_relu Float32Regs:$a, Float32Regs:$b),
+def : Pat<(int_nvvm_ff_to_e5m2x2_rn_relu f32:$a, f32:$b),
           (CVT_e5m2x2_f32 Float32Regs:$a, Float32Regs:$b, CvtRN_RELU)>;
 
 def : Pat<(int_nvvm_f16x2_to_e4m3x2_rn Int32Regs:$a),
@@ -1809,19 +1809,19 @@ def : Pat<(int_nvvm_e5m2x2_to_f16x2_rn_relu Int16Regs:$a),
 class INT_FNS_MBO<dag ins, dag Operands>
   : NVPTXInst<(outs Int32Regs:$dst), ins,
                "fns.b32 \t$dst, $mask, $base, $offset;",
-               [(set Int32Regs:$dst, Operands )]>,
+               [(set i32:$dst, Operands)]>,
     Requires<[hasPTX<60>, hasSM<30>]>;
 
 def INT_FNS_rrr : INT_FNS_MBO<(ins Int32Regs:$mask, Int32Regs:$base, Int32Regs:$offset),
-                     (int_nvvm_fns Int32Regs:$mask, Int32Regs:$base, Int32Regs:$offset)>;
+                     (int_nvvm_fns i32:$mask, i32:$base, i32:$offset)>;
 def INT_FNS_rri : INT_FNS_MBO<(ins Int32Regs:$mask, Int32Regs:$base,    i32imm:$offset),
-                     (int_nvvm_fns Int32Regs:$mask, Int32Regs:$base,       imm:$offset)>;
+                     (int_nvvm_fns i32:$mask, i32:$base,       imm:$offset)>;
 def INT_FNS_rir : INT_FNS_MBO<(ins Int32Regs:$mask,    i32imm:$base, Int32Regs:$offset),
-                     (int_nvvm_fns Int32Regs:$mask,       imm:$base, Int32Regs:$offset)>;
+                     (int_nvvm_fns i32:$mask,       imm:$base, i32:$offset)>;
 def INT_FNS_rii : INT_FNS_MBO<(ins Int32Regs:$mask,    i32imm:$base,    i32imm:$offset),
-                     (int_nvvm_fns Int32Regs:$mask,       imm:$base,       imm:$offset)>;
+                     (int_nvvm_fns i32:$mask,       imm:$base,       imm:$offset)>;
 def INT_FNS_irr : INT_FNS_MBO<(ins    i32imm:$mask, Int32Regs:$base, Int32Regs:$offset),
-                     (int_nvvm_fns       imm:$mask, Int32Regs:$base, Int32Regs:$offset)>;
+                     (int_nvvm_fns       imm:$mask, i32:$base, i32:$offset)>;
 def INT_FNS_iri : INT_FNS_MBO<(ins    i32imm:$mask, Int32Regs:$base,    i32imm:$offset),
                      (int_nvvm_fns       imm:$mask, Int32Regs:$base,       imm:$offset)>;
 def INT_FNS_iir : INT_FNS_MBO<(ins    i32imm:$mask,    i32imm:$base, Int32Regs:$offset),
@@ -2363,7 +2363,7 @@ class ATOM23_impl<string AsmStr, ValueType regT, NVPTXRegClass regclass, list<Pr
                   dag ins, dag Operands>
       : NVPTXInst<(outs regclass:$result), ins,
                   AsmStr,
-                  [(set (regT regclass:$result), Operands)]>,
+                  [(set regT:$result, Operands)]>,
         Requires<Preds>;
 
 // Define instruction variants for all addressing modes.
@@ -2374,26 +2374,26 @@ multiclass ATOM2P_impl<string AsmStr,  Intrinsic Intr,
   let AddedComplexity = 1 in {
     def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int16Regs:$src, regclass:$b),
-                      (Intr (i16 Int16Regs:$src), (regT regclass:$b))>;
+                      (Intr i16:$src, regT:$b)>;
     def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int32Regs:$src, regclass:$b),
-                      (Intr (i32 Int32Regs:$src), (regT regclass:$b))>;
+                      (Intr i32:$src, regT:$b)>;
     def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int64Regs:$src, regclass:$b),
-                      (Intr (i64 Int64Regs:$src), (regT regclass:$b))>;
+                      (Intr i64:$src, regT:$b)>;
   }
   // tablegen can't infer argument types from Intrinsic (though it can
   // from Instruction) so we have to enforce specific type on
   // immediates via explicit cast to ImmTy.
   def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                     (ins Int16Regs:$src, ImmType:$b),
-                    (Intr (i16 Int16Regs:$src), (ImmTy Imm:$b))>;
+                    (Intr i16:$src, (ImmTy Imm:$b))>;
   def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                     (ins Int32Regs:$src, ImmType:$b),
-                    (Intr (i32 Int32Regs:$src), (ImmTy Imm:$b))>;
+                    (Intr i32:$src, (ImmTy Imm:$b))>;
   def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                     (ins Int64Regs:$src, ImmType:$b),
-                    (Intr (i64 Int64Regs:$src), (ImmTy Imm:$b))>;
+                    (Intr i64:$src, (ImmTy Imm:$b))>;
 }
 
 multiclass ATOM3P_impl<string AsmStr,  Intrinsic Intr,
@@ -2404,31 +2404,31 @@ multiclass ATOM3P_impl<string AsmStr,  Intrinsic Intr,
   let AddedComplexity = 2 in {
     def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int32Regs:$src, regclass:$b, regclass:$c),
-                      (Intr (i32 Int32Regs:$src), (regT regclass:$b), (regT regclass:$c))>;
+                      (Intr i32:$src, regT:$b, regT:$c)>;
     def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int64Regs:$src, regclass:$b, regclass:$c),
-                      (Intr (i64 Int64Regs:$src), (regT regclass:$b), (regT regclass:$c))>;
+                      (Intr i64:$src, regT:$b, regT:$c)>;
   }
   let AddedComplexity = 1 in {
     def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int32Regs:$src, ImmType:$b, regclass:$c),
-                      (Intr (i32 Int32Regs:$src), (ImmTy Imm:$b), (regT regclass:$c))>;
+                      (Intr i32:$src, (ImmTy Imm:$b), regT:$c)>;
     def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int64Regs:$src, ImmType:$b, regclass:$c),
-                      (Intr (i64 Int64Regs:$src), (ImmTy Imm:$b), (regT regclass:$c))>;
+                      (Intr i64:$src, (ImmTy Imm:$b), regT:$c)>;
     def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int32Regs:$src, regclass:$b, ImmType:$c),
-                      (Intr (i32 Int32Regs:$src), (regT regclass:$b), (ImmTy Imm:$c))>;
+                      (Intr i32:$src, regT:$b, (ImmTy Imm:$c))>;
     def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                       (ins Int64Regs:$src, regclass:$b, ImmType:$c),
-                      (Intr (i64 Int64Regs:$src), (regT regclass:$b), (ImmTy Imm:$c))>;
+                      (Intr i64:$src, regT:$b, (ImmTy Imm:$c))>;
   }
   def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                     (ins Int32Regs:$src, ImmType:$b, ImmType:$c),
-                    (Intr (i32 Int32Regs:$src), (ImmTy Imm:$b), (ImmTy Imm:$c))>;
+                    (Intr i32:$src, (ImmTy Imm:$b), (ImmTy Imm:$c))>;
   def : ATOM23_impl<AsmStr, regT, regclass, Preds,
                     (ins Int64Regs:$src, ImmType:$b, ImmType:$c),
-                    (Intr (i64 Int64Regs:$src), (ImmTy Imm:$b), (ImmTy Imm:$c))>;
+                    (Intr i64:$src, (ImmTy Imm:$b), (ImmTy Imm:$c))>;
 }
 
 // Constructs intrinsic name and instruction asm strings.
@@ -2795,48 +2795,48 @@ defm cvta_to_const  : G_TO_NG<"const">;
 // nvvm.ptr.param.to.gen
 defm cvta_param : NG_TO_G<"param">;
 
-def : Pat<(int_nvvm_ptr_param_to_gen Int32Regs:$src),
+def : Pat<(int_nvvm_ptr_param_to_gen i32:$src),
           (cvta_param Int32Regs:$src)>;
 
-def : Pat<(int_nvvm_ptr_param_to_gen Int64Regs:$src),
+def : Pat<(int_nvvm_ptr_param_to_gen i64:$src),
           (cvta_param_64 Int64Regs:$src)>;
 
 // nvvm.ptr.gen.to.param
-def : Pat<(int_nvvm_ptr_gen_to_param Int32Regs:$src),
+def : Pat<(int_nvvm_ptr_gen_to_param i32:$src),
           (IMOV32rr Int32Regs:$src)>;
 
-def : Pat<(int_nvvm_ptr_gen_to_param Int64Regs:$src),
+def : Pat<(int_nvvm_ptr_gen_to_param i64:$src),
           (IMOV64rr Int64Regs:$src)>;
 
 // nvvm.move intrinsicc
 def nvvm_move_i16 : NVPTXInst<(outs Int16Regs:$r), (ins Int16Regs:$s),
                              "mov.b16 \t$r, $s;",
-                             [(set Int16Regs:$r,
-                               (int_nvvm_move_i16 Int16Regs:$s))]>;
+                             [(set i16:$r,
+                               (int_nvvm_move_i16 i16:$s))]>;
 def nvvm_move_i32 : NVPTXInst<(outs Int32Regs:$r), (ins Int32Regs:$s),
                              "mov.b32 \t$r, $s;",
-                             [(set Int32Regs:$r,
-                               (int_nvvm_move_i32 Int32Regs:$s))]>;
+                             [(set i32:$r,
+                               (int_nvvm_move_i32 i32:$s))]>;
 def nvvm_move_i64 : NVPTXInst<(outs Int64Regs:$r), (ins Int64Regs:$s),
                              "mov.b64 \t$r, $s;",
-                             [(set Int64Regs:$r,
-                               (int_nvvm_move_i64 Int64Regs:$s))]>;
+                             [(set i64:$r,
+                               (int_nvvm_move_i64 i64:$s))]>;
 def nvvm_move_float : NVPTXInst<(outs Float32Regs:$r), (ins Float32Regs:$s),
                              "mov.f32 \t$r, $s;",
-                             [(set Float32Regs:$r,
-                               (int_nvvm_move_float Float32Regs:$s))]>;
+                             [(set f32:$r,
+                               (int_nvvm_move_float f32:$s))]>;
 def nvvm_move_double : NVPTXInst<(outs Float64Regs:$r), (ins Float64Regs:$s),
                              "mov.f64 \t$r, $s;",
-                             [(set Float64Regs:$r,
-                               (int_nvvm_move_double Float64Regs:$s))]>;
+                             [(set f64:$r,
+                               (int_nvvm_move_double f64:$s))]>;
 def nvvm_move_ptr32 : NVPTXInst<(outs Int32Regs:$r), (ins Int32Regs:$s),
                              "mov.u32 \t$r, $s;",
-                             [(set Int32Regs:$r,
-                               (int_nvvm_move_ptr Int32Regs:$s))]>;
+                             [(set i32:$r,
+                               (int_nvvm_move_ptr i32:$s))]>;
 def nvvm_move_ptr64 : NVPTXInst<(outs Int64Regs:$r), (ins Int64Regs:$s),
                              "mov.u64 \t$r, $s;",
-                             [(set Int64Regs:$r,
-                               (int_nvvm_move_ptr Int64Regs:$s))]>;
+                             [(set i64:$r,
+                               (int_nvvm_move_ptr i64:$s))]>;
 
 // @TODO: Are these actually needed, or will we always just see symbols
 // copied to registers first?
@@ -2860,16 +2860,16 @@ def texsurf_handles
 
 def INT_NVVM_COMPILER_WARN_32 : NVPTXInst<(outs), (ins Int32Regs:$a),
                 "// llvm.nvvm.compiler.warn()",
-                [(int_nvvm_compiler_warn Int32Regs:$a)]>;
+                [(int_nvvm_compiler_warn i32:$a)]>;
 def INT_NVVM_COMPILER_WARN_64 : NVPTXInst<(outs), (ins Int64Regs:$a),
                 "// llvm.nvvm.compiler.warn()",
-                [(int_nvvm_compiler_warn Int64Regs:$a)]>;
+                [(int_nvvm_compiler_warn i64:$a)]>;
 def INT_NVVM_COMPILER_ERROR_32 : NVPTXInst<(outs), (ins Int32Regs:$a),
                 "// llvm.nvvm.compiler.error()",
-                [(int_nvvm_compiler_error Int32Regs:$a)]>;
+                [(int_nvvm_compiler_error i32:$a)]>;
 def INT_NVVM_COMPILER_ERROR_64 : NVPTXInst<(outs), (ins Int64Regs:$a),
                 "// llvm.nvvm.compiler.error()",
-                [(int_nvvm_compiler_error Int64Regs:$a)]>;
+                [(int_nvvm_compiler_error i64:$a)]>;
 
 
 // isspacep
@@ -2877,11 +2877,11 @@ def INT_NVVM_COMPILER_ERROR_64 : NVPTXInst<(outs), (ins Int64Regs:$a),
 multiclass ISSPACEP<string suffix, Intrinsic Intr, list<Predicate> Preds = []> {
   def _32: NVPTXInst<(outs Int1Regs:$d), (ins Int32Regs:$a),
               "isspacep." # suffix # "\t$d, $a;",
-              [(set Int1Regs:$d, (Intr Int32Regs:$a))]>,
+              [(set i1:$d, (Intr i32:$a))]>,
     Requires<Preds>;
   def _64: NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
               "isspacep." # suffix # "\t$d, $a;",
-              [(set Int1Regs:$d, (Intr Int64Regs:$a))]>,
+              [(set i1:$d, (Intr i64:$a))]>,
     Requires<Preds>;
 }
 
@@ -2932,7 +2932,7 @@ def : Pat<(int_nvvm_read_ptx_sreg_envreg30), (MOV_SPECIAL ENVREG30)>;
 def : Pat<(int_nvvm_read_ptx_sreg_envreg31), (MOV_SPECIAL ENVREG31)>;
 
 
-def : Pat<(int_nvvm_swap_lo_hi_b64 Int64Regs:$src),
+def : Pat<(int_nvvm_swap_lo_hi_b64 i64:$src),
           (V2I32toI64 (I64toI32H Int64Regs:$src),
                       (I64toI32L Int64Regs:$src))> ;
 
@@ -5039,22 +5039,22 @@ def TXQ_NUM_MIPMAP_LEVELS_I
               []>;
 }
 
-def : Pat<(int_nvvm_txq_channel_order Int64Regs:$a),
-          (TXQ_CHANNEL_ORDER_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_txq_channel_data_type Int64Regs:$a),
-          (TXQ_CHANNEL_DATA_TYPE_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_txq_width Int64Regs:$a),
-          (TXQ_WIDTH_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_txq_height Int64Regs:$a),
-          (TXQ_HEIGHT_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_txq_depth Int64Regs:$a),
-          (TXQ_DEPTH_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_txq_array_size Int64Regs:$a),
-          (TXQ_ARRAY_SIZE_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_txq_num_samples Int64Regs:$a),
-          (TXQ_NUM_SAMPLES_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_txq_num_mipmap_levels Int64Regs:$a),
-          (TXQ_NUM_MIPMAP_LEVELS_R Int64Regs:$a)>;
+def : Pat<(int_nvvm_txq_channel_order i64:$a),
+          (TXQ_CHANNEL_ORDER_R i64:$a)>;
+def : Pat<(int_nvvm_txq_channel_data_type i64:$a),
+          (TXQ_CHANNEL_DATA_TYPE_R i64:$a)>;
+def : Pat<(int_nvvm_txq_width i64:$a),
+          (TXQ_WIDTH_R i64:$a)>;
+def : Pat<(int_nvvm_txq_height i64:$a),
+          (TXQ_HEIGHT_R i64:$a)>;
+def : Pat<(int_nvvm_txq_depth i64:$a),
+          (TXQ_DEPTH_R i64:$a)>;
+def : Pat<(int_nvvm_txq_array_size i64:$a),
+          (TXQ_ARRAY_SIZE_R i64:$a)>;
+def : Pat<(int_nvvm_txq_num_samples i64:$a),
+          (TXQ_NUM_SAMPLES_R i64:$a)>;
+def : Pat<(int_nvvm_txq_num_mipmap_levels i64:$a),
+          (TXQ_NUM_MIPMAP_LEVELS_R i64:$a)>;
 
 
 //-----------------------------------
@@ -5112,17 +5112,17 @@ def SUQ_ARRAY_SIZE_I
               []>;
 }
 
-def : Pat<(int_nvvm_suq_channel_order Int64Regs:$a),
+def : Pat<(int_nvvm_suq_channel_order i64:$a),
           (SUQ_CHANNEL_ORDER_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_suq_channel_data_type Int64Regs:$a),
+def : Pat<(int_nvvm_suq_channel_data_type i64:$a),
           (SUQ_CHANNEL_DATA_TYPE_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_suq_width Int64Regs:$a),
+def : Pat<(int_nvvm_suq_width i64:$a),
           (SUQ_WIDTH_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_suq_height Int64Regs:$a),
+def : Pat<(int_nvvm_suq_height i64:$a),
           (SUQ_HEIGHT_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_suq_depth Int64Regs:$a),
+def : Pat<(int_nvvm_suq_depth i64:$a),
           (SUQ_DEPTH_R Int64Regs:$a)>;
-def : Pat<(int_nvvm_suq_array_size Int64Regs:$a),
+def : Pat<(int_nvvm_suq_array_size i64:$a),
           (SUQ_ARRAY_SIZE_R Int64Regs:$a)>;
 
 
@@ -5132,15 +5132,15 @@ def : Pat<(int_nvvm_suq_array_size Int64Regs:$a),
 def ISTYPEP_SAMPLER
   : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
               "istypep.samplerref \t$d, $a;",
-              [(set Int1Regs:$d, (int_nvvm_istypep_sampler Int64Regs:$a))]>;
+              [(set i1:$d, (int_nvvm_istypep_sampler i64:$a))]>;
 def ISTYPEP_SURFACE
   : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
               "istypep.surfref \t$d, $a;",
-              [(set Int1Regs:$d, (int_nvvm_istypep_surface Int64Regs:$a))]>;
+              [(set i1:$d, (int_nvvm_istypep_surface i64:$a))]>;
 def ISTYPEP_TEXTURE
   : NVPTXInst<(outs Int1Regs:$d), (ins Int64Regs:$a),
               "istypep.texref \t$d, $a;",
-              [(set Int1Regs:$d, (int_nvvm_istypep_texture Int64Regs:$a))]>;
+              [(set i1:$d, (int_nvvm_istypep_texture i64:$a))]>;
 
 //===- Surface Stores -----------------------------------------------------===//
 
@@ -6931,13 +6931,13 @@ def : Pat<(int_nvvm_sust_p_3d_v4i32_trap
 class PTX_READ_SREG_R64<string regname, Intrinsic intop, list<Predicate> Preds=[]>
   : NVPTXInst<(outs Int64Regs:$d), (ins),
               !strconcat("mov.u64 \t$d, %", regname, ";"),
-              [(set Int64Regs:$d, (intop))]>,
+              [(set i64:$d, (intop))]>,
     Requires<Preds>;
 
 class PTX_READ_SREG_R32<string regname, Intrinsic intop, list<Predicate> Preds=[]>
   : NVPTXInst<(outs Int32Regs:$d), (ins),
               !strconcat("mov.u32 \t$d, %", regname, ";"),
-              [(set Int32Regs:$d, (intop))]>,
+              [(set i32:$d, (intop))]>,
     Requires<Preds>;
 
 multiclass PTX_READ_SREG_R32V4<string regname, list<Predicate> Preds=[]> {
@@ -7019,7 +7019,7 @@ def INT_PTX_SREG_PM3 : PTX_READ_SREG_R32<"pm3", int_nvvm_read_ptx_sreg_pm3>;
 // handle the constant.
 def INT_PTX_SREG_WARPSIZE :
     NVPTXInst<(outs Int32Regs:$dst), (ins), "mov.u32 \t$dst, WARP_SZ;",
-              [(set Int32Regs:$dst, (int_nvvm_read_ptx_sreg_warpsize))]>;
+              [(set i32:$dst, (int_nvvm_read_ptx_sreg_warpsize))]>;
 
 // Helper class that represents a 'fragment' of an NVPTX *MMA instruction.
 // In addition to target-independent fields provided by WMMA_REGS, it adds
@@ -7431,19 +7431,19 @@ foreach mma = !listconcat(MMAs, WMMAs, MMA_LDSTs, LDMATRIXs) in
 multiclass MAPA<string suffix, Intrinsic Intr> {
   def _32: NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a, Int32Regs:$b),
               "mapa" # suffix # ".u32\t$d, $a, $b;",
-              [(set Int32Regs:$d, (Intr Int32Regs:$a, Int32Regs:$b))]>,
+              [(set i32:$d, (Intr i32:$a, i32:$b))]>,
     Requires<[hasSM<90>, hasPTX<78>]>;
   def _32i: NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a, i32imm:$b),
               "mapa" # suffix # ".u32\t$d, $a, $b;",
-              [(set Int32Regs:$d, (Intr Int32Regs:$a, imm:$b))]>,
+              [(set i32:$d, (Intr i32:$a, imm:$b))]>,
     Requires<[hasSM<90>, hasPTX<78>]>;
   def _64: NVPTXInst<(outs Int64Regs:$d), (ins Int64Regs:$a, Int32Regs:$b),
               "mapa" # suffix # ".u64\t$d, $a, $b;",
-              [(set Int64Regs:$d, (Intr Int64Regs:$a, Int32Regs:$b))]>,
+              [(set i64:$d, (Intr i64:$a, i32:$b))]>,
     Requires<[hasSM<90>, hasPTX<78>]>;
   def _64i: NVPTXInst<(outs Int64Regs:$d), (ins Int64Regs:$a, i32imm:$b),
               "mapa" # suffix # ".u64\t$d, $a, $b;",
-              [(set Int64Regs:$d, (Intr Int64Regs:$a, imm:$b))]>,
+              [(set i64:$d, (Intr i64:$a, imm:$b))]>,
     Requires<[hasSM<90>, hasPTX<78>]>;
 }
 
@@ -7454,11 +7454,11 @@ defm mapa_shared_cluster  : MAPA<".shared::cluster", int_nvvm_mapa_shared_cluste
 multiclass GETCTARANK<string suffix, Intrinsic Intr> {
   def _32: NVPTXInst<(outs Int32Regs:$d), (ins Int32Regs:$a),
               "getctarank" # suffix # ".u32\t$d, $a;",
-              [(set Int32Regs:$d, (Intr Int32Regs:$a))]>,
+              [(set i32:$d, (Intr i32:$a))]>,
     Requires<[hasSM<90>, hasPTX<78>]>;
   def _64: NVPTXInst<(outs Int32Regs:$d), (ins Int64Regs:$a),
               "getctarank" # suffix # ".u64\t$d, $a;",
-              [(set Int32Regs:$d, (Intr Int64Regs:$a))]>,
+              [(set i32:$d, (Intr i64:$a))]>,
     Requires<[hasSM<90>, hasPTX<78>]>;
 }
 
@@ -7467,7 +7467,7 @@ defm getctarank_shared_cluster  : GETCTARANK<".shared::cluster", int_nvvm_getcta
 
 def is_explicit_cluster: NVPTXInst<(outs Int1Regs:$d), (ins),
               "mov.pred\t$d, %is_explicit_cluster;",
-              [(set Int1Regs:$d, (int_nvvm_is_explicit_cluster))]>,
+              [(set i1:$d, (int_nvvm_is_explicit_cluster))]>,
     Requires<[hasSM<90>, hasPTX<78>]>;
 
 // setmaxnreg inc/dec intrinsics


        


More information about the llvm-commits mailing list