[llvm] 4b24ab4 - Reland "[NVPTX] Add folding for cvt.rn.bf16x2.f32" (#116417)

via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 6 13:30:12 PST 2024


Author: Alex MacLean
Date: 2024-12-06T13:30:09-08:00
New Revision: 4b24ab4be9351ef822fd8fd546237eabd8c3ba57

URL: https://github.com/llvm/llvm-project/commit/4b24ab4be9351ef822fd8fd546237eabd8c3ba57
DIFF: https://github.com/llvm/llvm-project/commit/4b24ab4be9351ef822fd8fd546237eabd8c3ba57.diff

LOG: Reland "[NVPTX] Add folding for cvt.rn.bf16x2.f32" (#116417)

Reland https://github.com/llvm/llvm-project/pull/116109.

Fixes issue where operands were flipped. 

Per the PTX spec, a mov instruction packs the first operand as low, and
the second operand as high:
> ```
> // pack two 16-bit elements into .b32
> d = a.x | (a.y << 16)
> ```
On the other hand cvt.rn.f16x2.f32 instructions take high, than low
operands:
> For .f16x2 and .bf16x2 instruction type, two inputs a and b of .f32
type are converted into .f16 or .bf16 type and the converted values are
packed in the destination register d, such that the value converted from
input a is stored in the upper half of d and the value converted from
input b is stored in the lower half of d

Added: 
    

Modified: 
    llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
    llvm/test/CodeGen/NVPTX/bf16-instructions.ll
    llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll
    llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
    llvm/test/CodeGen/NVPTX/convert-sm80.ll
    llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
    llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
    llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
index b8a55851e8d3e1..66684dbff6b335 100644
--- a/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
+++ b/llvm/lib/Target/NVPTX/NVPTXInstrInfo.td
@@ -727,6 +727,20 @@ let hasSideEffects = false in {
   def CVT_f16x2_e5m2x2 : CVT_f16x2_fp8<"e5m2">;
 }
 
+def fpround_oneuse : PatFrag<(ops node:$a), (fpround node:$a), [{
+  return N->hasOneUse();
+}]>;
+
+def : Pat<(v2bf16 (build_vector (bf16 (fpround_oneuse Float32Regs:$lo)),
+                                (bf16 (fpround_oneuse Float32Regs:$hi)))),
+          (CVT_bf16x2_f32 Float32Regs:$hi, Float32Regs:$lo, CvtRN)>,
+      Requires<[hasPTX<70>, hasSM<80>, hasBF16Math]>;
+
+def : Pat<(v2f16 (build_vector (f16 (fpround_oneuse Float32Regs:$lo)),
+                               (f16 (fpround_oneuse Float32Regs:$hi)))),
+          (CVT_f16x2_f32 Float32Regs:$hi, Float32Regs:$lo, CvtRN)>,
+      Requires<[hasPTX<70>, hasSM<80>, useFP16Math]>;
+
 //-----------------------------------
 // Selection instructions (selp)
 //-----------------------------------

diff  --git a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
index 80815b3ca37c05..eee31be80e9826 100644
--- a/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16-instructions.ll
@@ -204,7 +204,7 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ;
 ; SM80-LABEL: test_faddx2(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<7>;
+; SM80-NEXT:    .reg .b16 %rs<5>;
 ; SM80-NEXT:    .reg .b32 %r<4>;
 ; SM80-NEXT:    .reg .f32 %f<7>;
 ; SM80-EMPTY:
@@ -212,22 +212,20 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    ld.param.b32 %r1, [test_faddx2_param_0];
 ; SM80-NEXT:    ld.param.b32 %r2, [test_faddx2_param_1];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f2, %rs4;
+; SM80-NEXT:    cvt.f32.bf16 %f2, %rs3;
 ; SM80-NEXT:    add.rn.f32 %f3, %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-NEXT:    cvt.f32.bf16 %f4, %rs1;
-; SM80-NEXT:    cvt.f32.bf16 %f5, %rs3;
+; SM80-NEXT:    cvt.f32.bf16 %f4, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f5, %rs4;
 ; SM80-NEXT:    add.rn.f32 %f6, %f5, %f4;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-NEXT:    ret;
 ;
 ; SM80-FTZ-LABEL: test_faddx2(
 ; SM80-FTZ:       {
-; SM80-FTZ-NEXT:    .reg .b16 %rs<7>;
+; SM80-FTZ-NEXT:    .reg .b16 %rs<5>;
 ; SM80-FTZ-NEXT:    .reg .b32 %r<4>;
 ; SM80-FTZ-NEXT:    .reg .f32 %f<7>;
 ; SM80-FTZ-EMPTY:
@@ -235,16 +233,14 @@ define <2 x bfloat> @test_faddx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-FTZ-NEXT:    ld.param.b32 %r1, [test_faddx2_param_0];
 ; SM80-FTZ-NEXT:    ld.param.b32 %r2, [test_faddx2_param_1];
 ; SM80-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f1, %rs2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f1, %rs1;
 ; SM80-FTZ-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f2, %rs4;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f2, %rs3;
 ; SM80-FTZ-NEXT:    add.rn.ftz.f32 %f3, %f2, %f1;
-; SM80-FTZ-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f4, %rs1;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f5, %rs3;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f4, %rs2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f5, %rs4;
 ; SM80-FTZ-NEXT:    add.rn.ftz.f32 %f6, %f5, %f4;
-; SM80-FTZ-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-FTZ-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-FTZ-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-FTZ-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-FTZ-NEXT:    ret;
 ;
@@ -311,7 +307,7 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ;
 ; SM80-LABEL: test_fsubx2(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<7>;
+; SM80-NEXT:    .reg .b16 %rs<5>;
 ; SM80-NEXT:    .reg .b32 %r<4>;
 ; SM80-NEXT:    .reg .f32 %f<7>;
 ; SM80-EMPTY:
@@ -319,22 +315,20 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    ld.param.b32 %r1, [test_fsubx2_param_0];
 ; SM80-NEXT:    ld.param.b32 %r2, [test_fsubx2_param_1];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f2, %rs4;
+; SM80-NEXT:    cvt.f32.bf16 %f2, %rs3;
 ; SM80-NEXT:    sub.rn.f32 %f3, %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-NEXT:    cvt.f32.bf16 %f4, %rs1;
-; SM80-NEXT:    cvt.f32.bf16 %f5, %rs3;
+; SM80-NEXT:    cvt.f32.bf16 %f4, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f5, %rs4;
 ; SM80-NEXT:    sub.rn.f32 %f6, %f5, %f4;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-NEXT:    ret;
 ;
 ; SM80-FTZ-LABEL: test_fsubx2(
 ; SM80-FTZ:       {
-; SM80-FTZ-NEXT:    .reg .b16 %rs<7>;
+; SM80-FTZ-NEXT:    .reg .b16 %rs<5>;
 ; SM80-FTZ-NEXT:    .reg .b32 %r<4>;
 ; SM80-FTZ-NEXT:    .reg .f32 %f<7>;
 ; SM80-FTZ-EMPTY:
@@ -342,16 +336,14 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-FTZ-NEXT:    ld.param.b32 %r1, [test_fsubx2_param_0];
 ; SM80-FTZ-NEXT:    ld.param.b32 %r2, [test_fsubx2_param_1];
 ; SM80-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f1, %rs2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f1, %rs1;
 ; SM80-FTZ-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f2, %rs4;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f2, %rs3;
 ; SM80-FTZ-NEXT:    sub.rn.ftz.f32 %f3, %f2, %f1;
-; SM80-FTZ-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f4, %rs1;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f5, %rs3;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f4, %rs2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f5, %rs4;
 ; SM80-FTZ-NEXT:    sub.rn.ftz.f32 %f6, %f5, %f4;
-; SM80-FTZ-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-FTZ-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-FTZ-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-FTZ-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-FTZ-NEXT:    ret;
 ;
@@ -418,7 +410,7 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ;
 ; SM80-LABEL: test_fmulx2(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<7>;
+; SM80-NEXT:    .reg .b16 %rs<5>;
 ; SM80-NEXT:    .reg .b32 %r<4>;
 ; SM80-NEXT:    .reg .f32 %f<7>;
 ; SM80-EMPTY:
@@ -426,22 +418,20 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_0];
 ; SM80-NEXT:    ld.param.b32 %r2, [test_fmulx2_param_1];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f2, %rs4;
+; SM80-NEXT:    cvt.f32.bf16 %f2, %rs3;
 ; SM80-NEXT:    mul.rn.f32 %f3, %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-NEXT:    cvt.f32.bf16 %f4, %rs1;
-; SM80-NEXT:    cvt.f32.bf16 %f5, %rs3;
+; SM80-NEXT:    cvt.f32.bf16 %f4, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f5, %rs4;
 ; SM80-NEXT:    mul.rn.f32 %f6, %f5, %f4;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-NEXT:    ret;
 ;
 ; SM80-FTZ-LABEL: test_fmulx2(
 ; SM80-FTZ:       {
-; SM80-FTZ-NEXT:    .reg .b16 %rs<7>;
+; SM80-FTZ-NEXT:    .reg .b16 %rs<5>;
 ; SM80-FTZ-NEXT:    .reg .b32 %r<4>;
 ; SM80-FTZ-NEXT:    .reg .f32 %f<7>;
 ; SM80-FTZ-EMPTY:
@@ -449,16 +439,14 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-FTZ-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_0];
 ; SM80-FTZ-NEXT:    ld.param.b32 %r2, [test_fmulx2_param_1];
 ; SM80-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f1, %rs2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f1, %rs1;
 ; SM80-FTZ-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f2, %rs4;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f2, %rs3;
 ; SM80-FTZ-NEXT:    mul.rn.ftz.f32 %f3, %f2, %f1;
-; SM80-FTZ-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f4, %rs1;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f5, %rs3;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f4, %rs2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f5, %rs4;
 ; SM80-FTZ-NEXT:    mul.rn.ftz.f32 %f6, %f5, %f4;
-; SM80-FTZ-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-FTZ-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-FTZ-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-FTZ-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-FTZ-NEXT:    ret;
 ;
@@ -525,7 +513,7 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ;
 ; SM80-LABEL: test_fdiv(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<7>;
+; SM80-NEXT:    .reg .b16 %rs<5>;
 ; SM80-NEXT:    .reg .b32 %r<4>;
 ; SM80-NEXT:    .reg .f32 %f<7>;
 ; SM80-EMPTY:
@@ -533,22 +521,20 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    ld.param.b32 %r1, [test_fdiv_param_0];
 ; SM80-NEXT:    ld.param.b32 %r2, [test_fdiv_param_1];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f2, %rs4;
+; SM80-NEXT:    cvt.f32.bf16 %f2, %rs3;
 ; SM80-NEXT:    div.rn.f32 %f3, %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-NEXT:    cvt.f32.bf16 %f4, %rs1;
-; SM80-NEXT:    cvt.f32.bf16 %f5, %rs3;
+; SM80-NEXT:    cvt.f32.bf16 %f4, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f5, %rs4;
 ; SM80-NEXT:    div.rn.f32 %f6, %f5, %f4;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-NEXT:    ret;
 ;
 ; SM80-FTZ-LABEL: test_fdiv(
 ; SM80-FTZ:       {
-; SM80-FTZ-NEXT:    .reg .b16 %rs<7>;
+; SM80-FTZ-NEXT:    .reg .b16 %rs<5>;
 ; SM80-FTZ-NEXT:    .reg .b32 %r<4>;
 ; SM80-FTZ-NEXT:    .reg .f32 %f<7>;
 ; SM80-FTZ-EMPTY:
@@ -556,22 +542,20 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-FTZ-NEXT:    ld.param.b32 %r1, [test_fdiv_param_0];
 ; SM80-FTZ-NEXT:    ld.param.b32 %r2, [test_fdiv_param_1];
 ; SM80-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f1, %rs2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f1, %rs1;
 ; SM80-FTZ-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f2, %rs4;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f2, %rs3;
 ; SM80-FTZ-NEXT:    div.rn.ftz.f32 %f3, %f2, %f1;
-; SM80-FTZ-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f4, %rs1;
-; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f5, %rs3;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f4, %rs2;
+; SM80-FTZ-NEXT:    cvt.ftz.f32.bf16 %f5, %rs4;
 ; SM80-FTZ-NEXT:    div.rn.ftz.f32 %f6, %f5, %f4;
-; SM80-FTZ-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-FTZ-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-FTZ-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-FTZ-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-FTZ-NEXT:    ret;
 ;
 ; SM90-LABEL: test_fdiv(
 ; SM90:       {
-; SM90-NEXT:    .reg .b16 %rs<7>;
+; SM90-NEXT:    .reg .b16 %rs<5>;
 ; SM90-NEXT:    .reg .b32 %r<4>;
 ; SM90-NEXT:    .reg .f32 %f<7>;
 ; SM90-EMPTY:
@@ -579,16 +563,14 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM90-NEXT:    ld.param.b32 %r1, [test_fdiv_param_0];
 ; SM90-NEXT:    ld.param.b32 %r2, [test_fdiv_param_1];
 ; SM90-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM90-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM90-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM90-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM90-NEXT:    cvt.f32.bf16 %f2, %rs4;
+; SM90-NEXT:    cvt.f32.bf16 %f2, %rs3;
 ; SM90-NEXT:    div.rn.f32 %f3, %f2, %f1;
-; SM90-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM90-NEXT:    cvt.f32.bf16 %f4, %rs1;
-; SM90-NEXT:    cvt.f32.bf16 %f5, %rs3;
+; SM90-NEXT:    cvt.f32.bf16 %f4, %rs2;
+; SM90-NEXT:    cvt.f32.bf16 %f5, %rs4;
 ; SM90-NEXT:    div.rn.f32 %f6, %f5, %f4;
-; SM90-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM90-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM90-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM90-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM90-NEXT:    ret;
   %r = fdiv <2 x bfloat> %a, %b

diff  --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll
index 3e54aaf5580729..b317697210850a 100644
--- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions-approx.ll
@@ -10,20 +10,18 @@ declare <2 x bfloat> @llvm.cos.f16(<2 x bfloat> %a) #0
 define <2 x bfloat> @test_sin(<2 x bfloat> %a) #0 #1 {
 ; CHECK-LABEL: test_sin(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b16 %rs<5>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
 ; CHECK-NEXT:    .reg .b32 %r<3>;
 ; CHECK-NEXT:    .reg .f32 %f<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_sin_param_0];
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; CHECK-NEXT:    sin.approx.f32 %f2, %f1;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs3, %f2;
-; CHECK-NEXT:    cvt.f32.bf16 %f3, %rs1;
+; CHECK-NEXT:    cvt.f32.bf16 %f3, %rs2;
 ; CHECK-NEXT:    sin.approx.f32 %f4, %f3;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs4, %f4;
-; CHECK-NEXT:    mov.b32 %r2, {%rs4, %rs3};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r2, %f4, %f2;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r2;
 ; CHECK-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.sin.f16(<2 x bfloat> %a)
@@ -33,20 +31,18 @@ define <2 x bfloat> @test_sin(<2 x bfloat> %a) #0 #1 {
 define <2 x bfloat> @test_cos(<2 x bfloat> %a) #0 #1 {
 ; CHECK-LABEL: test_cos(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b16 %rs<5>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
 ; CHECK-NEXT:    .reg .b32 %r<3>;
 ; CHECK-NEXT:    .reg .f32 %f<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_cos_param_0];
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; CHECK-NEXT:    cos.approx.f32 %f2, %f1;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs3, %f2;
-; CHECK-NEXT:    cvt.f32.bf16 %f3, %rs1;
+; CHECK-NEXT:    cvt.f32.bf16 %f3, %rs2;
 ; CHECK-NEXT:    cos.approx.f32 %f4, %f3;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs4, %f4;
-; CHECK-NEXT:    mov.b32 %r2, {%rs4, %rs3};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r2, %f4, %f2;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r2;
 ; CHECK-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.cos.f16(<2 x bfloat> %a)

diff  --git a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
index e545d4c1177915..7f29676686a26d 100644
--- a/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
+++ b/llvm/test/CodeGen/NVPTX/bf16x2-instructions.ll
@@ -22,20 +22,18 @@ define <2 x bfloat> @test_ret_const() #0 {
 define <2 x bfloat> @test_fadd_imm_0(<2 x bfloat> %a) #0 {
 ; SM80-LABEL: test_fadd_imm_0(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<5>;
+; SM80-NEXT:    .reg .b16 %rs<3>;
 ; SM80-NEXT:    .reg .b32 %r<3>;
 ; SM80-NEXT:    .reg .f32 %f<5>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.b32 %r1, [test_fadd_imm_0_param_0];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
-; SM80-NEXT:    add.rn.f32 %f2, %f1, 0f40000000;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs3, %f2;
-; SM80-NEXT:    cvt.f32.bf16 %f3, %rs1;
-; SM80-NEXT:    add.rn.f32 %f4, %f3, 0f3F800000;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs4, %f4;
-; SM80-NEXT:    mov.b32 %r2, {%rs4, %rs3};
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
+; SM80-NEXT:    add.rn.f32 %f2, %f1, 0f3F800000;
+; SM80-NEXT:    cvt.f32.bf16 %f3, %rs2;
+; SM80-NEXT:    add.rn.f32 %f4, %f3, 0f40000000;
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r2, %f4, %f2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r2;
 ; SM80-NEXT:    ret;
 ;
@@ -84,7 +82,7 @@ define bfloat @test_fadd_imm_1(bfloat %a) #0 {
 define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-LABEL: test_fsubx2(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<7>;
+; SM80-NEXT:    .reg .b16 %rs<5>;
 ; SM80-NEXT:    .reg .b32 %r<4>;
 ; SM80-NEXT:    .reg .f32 %f<7>;
 ; SM80-EMPTY:
@@ -92,16 +90,14 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    ld.param.b32 %r1, [test_fsubx2_param_0];
 ; SM80-NEXT:    ld.param.b32 %r2, [test_fsubx2_param_1];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f2, %rs4;
+; SM80-NEXT:    cvt.f32.bf16 %f2, %rs3;
 ; SM80-NEXT:    sub.rn.f32 %f3, %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-NEXT:    cvt.f32.bf16 %f4, %rs1;
-; SM80-NEXT:    cvt.f32.bf16 %f5, %rs3;
+; SM80-NEXT:    cvt.f32.bf16 %f4, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f5, %rs4;
 ; SM80-NEXT:    sub.rn.f32 %f6, %f5, %f4;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-NEXT:    ret;
 ;
@@ -122,7 +118,7 @@ define <2 x bfloat> @test_fsubx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-LABEL: test_fmulx2(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<7>;
+; SM80-NEXT:    .reg .b16 %rs<5>;
 ; SM80-NEXT:    .reg .b32 %r<4>;
 ; SM80-NEXT:    .reg .f32 %f<7>;
 ; SM80-EMPTY:
@@ -130,16 +126,14 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    ld.param.b32 %r1, [test_fmulx2_param_0];
 ; SM80-NEXT:    ld.param.b32 %r2, [test_fmulx2_param_1];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f2, %rs4;
+; SM80-NEXT:    cvt.f32.bf16 %f2, %rs3;
 ; SM80-NEXT:    mul.rn.f32 %f3, %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; SM80-NEXT:    cvt.f32.bf16 %f4, %rs1;
-; SM80-NEXT:    cvt.f32.bf16 %f5, %rs3;
+; SM80-NEXT:    cvt.f32.bf16 %f4, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f5, %rs4;
 ; SM80-NEXT:    mul.rn.f32 %f6, %f5, %f4;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; SM80-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r3;
 ; SM80-NEXT:    ret;
 ;
@@ -160,7 +154,7 @@ define <2 x bfloat> @test_fmulx2(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; CHECK-LABEL: test_fdiv(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b16 %rs<7>;
+; CHECK-NEXT:    .reg .b16 %rs<5>;
 ; CHECK-NEXT:    .reg .b32 %r<4>;
 ; CHECK-NEXT:    .reg .f32 %f<7>;
 ; CHECK-EMPTY:
@@ -168,16 +162,14 @@ define <2 x bfloat> @test_fdiv(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_fdiv_param_0];
 ; CHECK-NEXT:    ld.param.b32 %r2, [test_fdiv_param_1];
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; CHECK-NEXT:    mov.b32 {%rs3, %rs4}, %r1;
-; CHECK-NEXT:    cvt.f32.bf16 %f2, %rs4;
+; CHECK-NEXT:    cvt.f32.bf16 %f2, %rs3;
 ; CHECK-NEXT:    div.rn.f32 %f3, %f2, %f1;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs5, %f3;
-; CHECK-NEXT:    cvt.f32.bf16 %f4, %rs1;
-; CHECK-NEXT:    cvt.f32.bf16 %f5, %rs3;
+; CHECK-NEXT:    cvt.f32.bf16 %f4, %rs2;
+; CHECK-NEXT:    cvt.f32.bf16 %f5, %rs4;
 ; CHECK-NEXT:    div.rn.f32 %f6, %f5, %f4;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs6, %f6;
-; CHECK-NEXT:    mov.b32 %r3, {%rs6, %rs5};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r3, %f6, %f3;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r3;
 ; CHECK-NEXT:    ret;
   %r = fdiv <2 x bfloat> %a, %b
@@ -418,15 +410,12 @@ define <2 x bfloat> @test_select_cc_bf16_f32(<2 x bfloat> %a, <2 x bfloat> %b,
 define <2 x bfloat> @test_fptrunc_2xfloat(<2 x float> %a) #0 {
 ; CHECK-LABEL: test_fptrunc_2xfloat(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b16 %rs<3>;
 ; CHECK-NEXT:    .reg .b32 %r<2>;
 ; CHECK-NEXT:    .reg .f32 %f<3>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.v2.f32 {%f1, %f2}, [test_fptrunc_2xfloat_param_0];
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs1, %f2;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs2, %f1;
-; CHECK-NEXT:    mov.b32 %r1, {%rs2, %rs1};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r1, %f2, %f1;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
 ; CHECK-NEXT:    ret;
   %r = fptrunc <2 x float> %a to <2 x bfloat>
@@ -503,20 +492,18 @@ declare <2 x bfloat> @llvm.fmuladd.f16(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bf
 define <2 x bfloat> @test_sqrt(<2 x bfloat> %a) #0 {
 ; CHECK-LABEL: test_sqrt(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b16 %rs<5>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
 ; CHECK-NEXT:    .reg .b32 %r<3>;
 ; CHECK-NEXT:    .reg .f32 %f<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_sqrt_param_0];
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; CHECK-NEXT:    sqrt.rn.f32 %f2, %f1;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs3, %f2;
-; CHECK-NEXT:    cvt.f32.bf16 %f3, %rs1;
+; CHECK-NEXT:    cvt.f32.bf16 %f3, %rs2;
 ; CHECK-NEXT:    sqrt.rn.f32 %f4, %f3;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs4, %f4;
-; CHECK-NEXT:    mov.b32 %r2, {%rs4, %rs3};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r2, %f4, %f2;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r2;
 ; CHECK-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.sqrt.f16(<2 x bfloat> %a)
@@ -556,7 +543,7 @@ define <2 x bfloat> @test_fabs(<2 x bfloat> %a) #0 {
 define <2 x bfloat> @test_fabs_add(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-LABEL: test_fabs_add(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<11>;
+; SM80-NEXT:    .reg .b16 %rs<7>;
 ; SM80-NEXT:    .reg .b32 %r<6>;
 ; SM80-NEXT:    .reg .f32 %f<11>;
 ; SM80-EMPTY:
@@ -564,25 +551,21 @@ define <2 x bfloat> @test_fabs_add(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 ; SM80-NEXT:    ld.param.b32 %r1, [test_fabs_add_param_1];
 ; SM80-NEXT:    ld.param.b32 %r2, [test_fabs_add_param_0];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r2;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    add.rn.f32 %f2, %f1, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs3, %f2;
-; SM80-NEXT:    cvt.f32.bf16 %f3, %rs1;
+; SM80-NEXT:    cvt.f32.bf16 %f3, %rs2;
 ; SM80-NEXT:    add.rn.f32 %f4, %f3, %f3;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs4, %f4;
-; SM80-NEXT:    mov.b32 %r3, {%rs4, %rs3};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r3, %f4, %f2;
 ; SM80-NEXT:    abs.bf16x2 %r4, %r3;
-; SM80-NEXT:    mov.b32 {%rs5, %rs6}, %r4;
-; SM80-NEXT:    cvt.f32.bf16 %f5, %rs6;
-; SM80-NEXT:    mov.b32 {%rs7, %rs8}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f6, %rs8;
+; SM80-NEXT:    mov.b32 {%rs3, %rs4}, %r4;
+; SM80-NEXT:    cvt.f32.bf16 %f5, %rs3;
+; SM80-NEXT:    mov.b32 {%rs5, %rs6}, %r1;
+; SM80-NEXT:    cvt.f32.bf16 %f6, %rs5;
 ; SM80-NEXT:    add.rn.f32 %f7, %f5, %f6;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs9, %f7;
-; SM80-NEXT:    cvt.f32.bf16 %f8, %rs5;
-; SM80-NEXT:    cvt.f32.bf16 %f9, %rs7;
+; SM80-NEXT:    cvt.f32.bf16 %f8, %rs4;
+; SM80-NEXT:    cvt.f32.bf16 %f9, %rs6;
 ; SM80-NEXT:    add.rn.f32 %f10, %f8, %f9;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs10, %f10;
-; SM80-NEXT:    mov.b32 %r5, {%rs10, %rs9};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r5, %f10, %f7;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r5;
 ; SM80-NEXT:    ret;
 ;
@@ -637,20 +620,18 @@ define <2 x bfloat> @test_maxnum(<2 x bfloat> %a, <2 x bfloat> %b) #0 {
 define <2 x bfloat> @test_floor(<2 x bfloat> %a) #0 {
 ; SM80-LABEL: test_floor(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<5>;
+; SM80-NEXT:    .reg .b16 %rs<3>;
 ; SM80-NEXT:    .reg .b32 %r<3>;
 ; SM80-NEXT:    .reg .f32 %f<5>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.b32 %r1, [test_floor_param_0];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    cvt.rmi.f32.f32 %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs3, %f2;
-; SM80-NEXT:    cvt.f32.bf16 %f3, %rs1;
+; SM80-NEXT:    cvt.f32.bf16 %f3, %rs2;
 ; SM80-NEXT:    cvt.rmi.f32.f32 %f4, %f3;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs4, %f4;
-; SM80-NEXT:    mov.b32 %r2, {%rs4, %rs3};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r2, %f4, %f2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r2;
 ; SM80-NEXT:    ret;
 ;
@@ -674,20 +655,18 @@ define <2 x bfloat> @test_floor(<2 x bfloat> %a) #0 {
 define <2 x bfloat> @test_ceil(<2 x bfloat> %a) #0 {
 ; SM80-LABEL: test_ceil(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<5>;
+; SM80-NEXT:    .reg .b16 %rs<3>;
 ; SM80-NEXT:    .reg .b32 %r<3>;
 ; SM80-NEXT:    .reg .f32 %f<5>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.b32 %r1, [test_ceil_param_0];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    cvt.rpi.f32.f32 %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs3, %f2;
-; SM80-NEXT:    cvt.f32.bf16 %f3, %rs1;
+; SM80-NEXT:    cvt.f32.bf16 %f3, %rs2;
 ; SM80-NEXT:    cvt.rpi.f32.f32 %f4, %f3;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs4, %f4;
-; SM80-NEXT:    mov.b32 %r2, {%rs4, %rs3};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r2, %f4, %f2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r2;
 ; SM80-NEXT:    ret;
 ;
@@ -711,20 +690,18 @@ define <2 x bfloat> @test_ceil(<2 x bfloat> %a) #0 {
 define <2 x bfloat> @test_trunc(<2 x bfloat> %a) #0 {
 ; SM80-LABEL: test_trunc(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<5>;
+; SM80-NEXT:    .reg .b16 %rs<3>;
 ; SM80-NEXT:    .reg .b32 %r<3>;
 ; SM80-NEXT:    .reg .f32 %f<5>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.b32 %r1, [test_trunc_param_0];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    cvt.rzi.f32.f32 %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs3, %f2;
-; SM80-NEXT:    cvt.f32.bf16 %f3, %rs1;
+; SM80-NEXT:    cvt.f32.bf16 %f3, %rs2;
 ; SM80-NEXT:    cvt.rzi.f32.f32 %f4, %f3;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs4, %f4;
-; SM80-NEXT:    mov.b32 %r2, {%rs4, %rs3};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r2, %f4, %f2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r2;
 ; SM80-NEXT:    ret;
 ;
@@ -748,20 +725,18 @@ define <2 x bfloat> @test_trunc(<2 x bfloat> %a) #0 {
 define <2 x bfloat> @test_rint(<2 x bfloat> %a) #0 {
 ; SM80-LABEL: test_rint(
 ; SM80:       {
-; SM80-NEXT:    .reg .b16 %rs<5>;
+; SM80-NEXT:    .reg .b16 %rs<3>;
 ; SM80-NEXT:    .reg .b32 %r<3>;
 ; SM80-NEXT:    .reg .f32 %f<5>;
 ; SM80-EMPTY:
 ; SM80-NEXT:  // %bb.0:
 ; SM80-NEXT:    ld.param.b32 %r1, [test_rint_param_0];
 ; SM80-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; SM80-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; SM80-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; SM80-NEXT:    cvt.rni.f32.f32 %f2, %f1;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs3, %f2;
-; SM80-NEXT:    cvt.f32.bf16 %f3, %rs1;
+; SM80-NEXT:    cvt.f32.bf16 %f3, %rs2;
 ; SM80-NEXT:    cvt.rni.f32.f32 %f4, %f3;
-; SM80-NEXT:    cvt.rn.bf16.f32 %rs4, %f4;
-; SM80-NEXT:    mov.b32 %r2, {%rs4, %rs3};
+; SM80-NEXT:    cvt.rn.bf16x2.f32 %r2, %f4, %f2;
 ; SM80-NEXT:    st.param.b32 [func_retval0], %r2;
 ; SM80-NEXT:    ret;
 ;
@@ -786,14 +761,14 @@ define <2 x bfloat> @test_round(<2 x bfloat> %a) #0 {
 ; CHECK-LABEL: test_round(
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .pred %p<5>;
-; CHECK-NEXT:    .reg .b16 %rs<5>;
+; CHECK-NEXT:    .reg .b16 %rs<3>;
 ; CHECK-NEXT:    .reg .b32 %r<9>;
 ; CHECK-NEXT:    .reg .f32 %f<17>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    ld.param.b32 %r1, [test_round_param_0];
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r1;
-; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs2;
+; CHECK-NEXT:    cvt.f32.bf16 %f1, %rs1;
 ; CHECK-NEXT:    mov.b32 %r2, %f1;
 ; CHECK-NEXT:    and.b32 %r3, %r2, -2147483648;
 ; CHECK-NEXT:    or.b32 %r4, %r3, 1056964608;
@@ -806,8 +781,7 @@ define <2 x bfloat> @test_round(<2 x bfloat> %a) #0 {
 ; CHECK-NEXT:    cvt.rzi.f32.f32 %f7, %f1;
 ; CHECK-NEXT:    setp.lt.f32 %p2, %f5, 0f3F000000;
 ; CHECK-NEXT:    selp.f32 %f8, %f7, %f6, %p2;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs3, %f8;
-; CHECK-NEXT:    cvt.f32.bf16 %f9, %rs1;
+; CHECK-NEXT:    cvt.f32.bf16 %f9, %rs2;
 ; CHECK-NEXT:    mov.b32 %r5, %f9;
 ; CHECK-NEXT:    and.b32 %r6, %r5, -2147483648;
 ; CHECK-NEXT:    or.b32 %r7, %r6, 1056964608;
@@ -820,8 +794,7 @@ define <2 x bfloat> @test_round(<2 x bfloat> %a) #0 {
 ; CHECK-NEXT:    cvt.rzi.f32.f32 %f15, %f9;
 ; CHECK-NEXT:    setp.lt.f32 %p4, %f13, 0f3F000000;
 ; CHECK-NEXT:    selp.f32 %f16, %f15, %f14, %p4;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs4, %f16;
-; CHECK-NEXT:    mov.b32 %r8, {%rs4, %rs3};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r8, %f16, %f8;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r8;
 ; CHECK-NEXT:    ret;
   %r = call <2 x bfloat> @llvm.round.f16(<2 x bfloat> %a)

diff  --git a/llvm/test/CodeGen/NVPTX/convert-sm80.ll b/llvm/test/CodeGen/NVPTX/convert-sm80.ll
index 4e30cebfe90251..1106a062f1d565 100644
--- a/llvm/test/CodeGen/NVPTX/convert-sm80.ll
+++ b/llvm/test/CodeGen/NVPTX/convert-sm80.ll
@@ -1,41 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
 ; RUN: llc < %s -march=nvptx64 -mcpu=sm_80 -mattr=+ptx70 | FileCheck %s
 ; RUN: %if ptxas-11.0 %{ llc < %s -march=nvptx64 -mcpu=sm_80 -mattr=+ptx70 | %ptxas-verify -arch=sm_80 %}
 
 
-; CHECK-LABEL: cvt_rn_bf16x2_f32
 define <2 x bfloat> @cvt_rn_bf16x2_f32(float %f1, float %f2) {
-
-; CHECK: cvt.rn.bf16x2.f32
-  %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rn(float %f1, float %f2);
-
-ret <2 x bfloat> %val
+; CHECK-LABEL: cvt_rn_bf16x2_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rn_bf16x2_f32_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [cvt_rn_bf16x2_f32_param_1];
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r1, %f1, %f2;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rn(float %f1, float %f2)
+  ret <2 x bfloat> %val
 }
 
-; CHECK-LABEL: cvt_rn_relu_bf16x2_f32
 define <2 x bfloat> @cvt_rn_relu_bf16x2_f32(float %f1, float %f2) {
-
-; CHECK: cvt.rn.relu.bf16x2.f32
-%val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rn.relu(float %f1, float %f2);
-
-ret <2 x bfloat> %val
+; CHECK-LABEL: cvt_rn_relu_bf16x2_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rn_relu_bf16x2_f32_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [cvt_rn_relu_bf16x2_f32_param_1];
+; CHECK-NEXT:    cvt.rn.relu.bf16x2.f32 %r1, %f1, %f2;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rn.relu(float %f1, float %f2)
+  ret <2 x bfloat> %val
 }
 
-; CHECK-LABEL: cvt_rz_bf16x2_f32
 define <2 x bfloat> @cvt_rz_bf16x2_f32(float %f1, float %f2) {
-
-; CHECK: cvt.rz.bf16x2.f32
-  %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rz(float %f1, float %f2);
-
-ret <2 x bfloat> %val
+; CHECK-LABEL: cvt_rz_bf16x2_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rz_bf16x2_f32_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [cvt_rz_bf16x2_f32_param_1];
+; CHECK-NEXT:    cvt.rz.bf16x2.f32 %r1, %f1, %f2;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rz(float %f1, float %f2)
+  ret <2 x bfloat> %val
 }
 
-; CHECK-LABEL: cvt_rz_relu_bf16x2_f32
 define <2 x bfloat> @cvt_rz_relu_bf16x2_f32(float %f1, float %f2) {
-
-; CHECK: cvt.rz.relu.bf16x2.f32
-%val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rz.relu(float %f1, float %f2);
-
-ret <2 x bfloat> %val
+; CHECK-LABEL: cvt_rz_relu_bf16x2_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rz_relu_bf16x2_f32_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [cvt_rz_relu_bf16x2_f32_param_1];
+; CHECK-NEXT:    cvt.rz.relu.bf16x2.f32 %r1, %f1, %f2;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %val = call <2 x bfloat> @llvm.nvvm.ff2bf16x2.rz.relu(float %f1, float %f2)
+  ret <2 x bfloat> %val
 }
 
 declare <2 x bfloat> @llvm.nvvm.ff2bf16x2.rn(float, float)
@@ -43,40 +72,68 @@ declare <2 x bfloat> @llvm.nvvm.ff2bf16x2.rn.relu(float, float)
 declare <2 x bfloat> @llvm.nvvm.ff2bf16x2.rz(float, float)
 declare <2 x bfloat> @llvm.nvvm.ff2bf16x2.rz.relu(float, float)
 
-; CHECK-LABEL: cvt_rn_f16x2_f32
 define <2 x half> @cvt_rn_f16x2_f32(float %f1, float %f2) {
-
-; CHECK: cvt.rn.f16x2.f32
-  %val = call <2 x half> @llvm.nvvm.ff2f16x2.rn(float %f1, float %f2);
-
-ret <2 x half> %val
+; CHECK-LABEL: cvt_rn_f16x2_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rn_f16x2_f32_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [cvt_rn_f16x2_f32_param_1];
+; CHECK-NEXT:    cvt.rn.f16x2.f32 %r1, %f1, %f2;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %val = call <2 x half> @llvm.nvvm.ff2f16x2.rn(float %f1, float %f2)
+  ret <2 x half> %val
 }
 
-; CHECK-LABEL: cvt_rn_relu_f16x2_f32
 define <2 x half> @cvt_rn_relu_f16x2_f32(float %f1, float %f2) {
-
-; CHECK: cvt.rn.relu.f16x2.f32
-%val = call <2 x half> @llvm.nvvm.ff2f16x2.rn.relu(float %f1, float %f2);
-
-ret <2 x half> %val
+; CHECK-LABEL: cvt_rn_relu_f16x2_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rn_relu_f16x2_f32_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [cvt_rn_relu_f16x2_f32_param_1];
+; CHECK-NEXT:    cvt.rn.relu.f16x2.f32 %r1, %f1, %f2;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %val = call <2 x half> @llvm.nvvm.ff2f16x2.rn.relu(float %f1, float %f2)
+  ret <2 x half> %val
 }
 
-; CHECK-LABEL: cvt_rz_f16x2_f32
 define <2 x half> @cvt_rz_f16x2_f32(float %f1, float %f2) {
-
-; CHECK: cvt.rz.f16x2.f32
-  %val = call <2 x half> @llvm.nvvm.ff2f16x2.rz(float %f1, float %f2);
-
-ret <2 x half> %val
+; CHECK-LABEL: cvt_rz_f16x2_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rz_f16x2_f32_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [cvt_rz_f16x2_f32_param_1];
+; CHECK-NEXT:    cvt.rz.f16x2.f32 %r1, %f1, %f2;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %val = call <2 x half> @llvm.nvvm.ff2f16x2.rz(float %f1, float %f2)
+  ret <2 x half> %val
 }
 
-; CHECK-LABEL: cvt_rz_relu_f16x2_f32
 define <2 x half> @cvt_rz_relu_f16x2_f32(float %f1, float %f2) {
-
-; CHECK: cvt.rz.relu.f16x2.f32
-%val = call <2 x half> @llvm.nvvm.ff2f16x2.rz.relu(float %f1, float %f2);
-
-ret <2 x half> %val
+; CHECK-LABEL: cvt_rz_relu_f16x2_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rz_relu_f16x2_f32_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [cvt_rz_relu_f16x2_f32_param_1];
+; CHECK-NEXT:    cvt.rz.relu.f16x2.f32 %r1, %f1, %f2;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %val = call <2 x half> @llvm.nvvm.ff2f16x2.rz.relu(float %f1, float %f2)
+  ret <2 x half> %val
 }
 
 declare <2 x half> @llvm.nvvm.ff2f16x2.rn(float, float)
@@ -84,40 +141,64 @@ declare <2 x half> @llvm.nvvm.ff2f16x2.rn.relu(float, float)
 declare <2 x half> @llvm.nvvm.ff2f16x2.rz(float, float)
 declare <2 x half> @llvm.nvvm.ff2f16x2.rz.relu(float, float)
 
-; CHECK-LABEL: cvt_rn_bf16_f32
 define bfloat @cvt_rn_bf16_f32(float %f1) {
-
-; CHECK: cvt.rn.bf16.f32
-  %val = call bfloat @llvm.nvvm.f2bf16.rn(float %f1);
-
-ret bfloat %val
+; CHECK-LABEL: cvt_rn_bf16_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<2>;
+; CHECK-NEXT:    .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rn_bf16_f32_param_0];
+; CHECK-NEXT:    cvt.rn.bf16.f32 %rs1, %f1;
+; CHECK-NEXT:    st.param.b16 [func_retval0], %rs1;
+; CHECK-NEXT:    ret;
+  %val = call bfloat @llvm.nvvm.f2bf16.rn(float %f1)
+  ret bfloat %val
 }
 
-; CHECK-LABEL: cvt_rn_relu_bf16_f32
 define bfloat @cvt_rn_relu_bf16_f32(float %f1) {
-
-; CHECK: cvt.rn.relu.bf16.f32
-%val = call bfloat @llvm.nvvm.f2bf16.rn.relu(float %f1);
-
-ret bfloat %val
+; CHECK-LABEL: cvt_rn_relu_bf16_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<2>;
+; CHECK-NEXT:    .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rn_relu_bf16_f32_param_0];
+; CHECK-NEXT:    cvt.rn.relu.bf16.f32 %rs1, %f1;
+; CHECK-NEXT:    st.param.b16 [func_retval0], %rs1;
+; CHECK-NEXT:    ret;
+  %val = call bfloat @llvm.nvvm.f2bf16.rn.relu(float %f1)
+  ret bfloat %val
 }
 
-; CHECK-LABEL: cvt_rz_bf16_f32
 define bfloat @cvt_rz_bf16_f32(float %f1) {
-
-; CHECK: cvt.rz.bf16.f32
-  %val = call bfloat @llvm.nvvm.f2bf16.rz(float %f1);
-
-ret bfloat %val
+; CHECK-LABEL: cvt_rz_bf16_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<2>;
+; CHECK-NEXT:    .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rz_bf16_f32_param_0];
+; CHECK-NEXT:    cvt.rz.bf16.f32 %rs1, %f1;
+; CHECK-NEXT:    st.param.b16 [func_retval0], %rs1;
+; CHECK-NEXT:    ret;
+  %val = call bfloat @llvm.nvvm.f2bf16.rz(float %f1)
+  ret bfloat %val
 }
 
-; CHECK-LABEL: cvt_rz_relu_bf16_f32
 define bfloat @cvt_rz_relu_bf16_f32(float %f1) {
-
-; CHECK: cvt.rz.relu.bf16.f32
-%val = call bfloat @llvm.nvvm.f2bf16.rz.relu(float %f1);
-
-ret bfloat %val
+; CHECK-LABEL: cvt_rz_relu_bf16_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b16 %rs<2>;
+; CHECK-NEXT:    .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rz_relu_bf16_f32_param_0];
+; CHECK-NEXT:    cvt.rz.relu.bf16.f32 %rs1, %f1;
+; CHECK-NEXT:    st.param.b16 [func_retval0], %rs1;
+; CHECK-NEXT:    ret;
+  %val = call bfloat @llvm.nvvm.f2bf16.rz.relu(float %f1)
+  ret bfloat %val
 }
 
 declare bfloat @llvm.nvvm.f2bf16.rn(float)
@@ -125,13 +206,58 @@ declare bfloat @llvm.nvvm.f2bf16.rn.relu(float)
 declare bfloat @llvm.nvvm.f2bf16.rz(float)
 declare bfloat @llvm.nvvm.f2bf16.rz.relu(float)
 
-; CHECK-LABEL: cvt_rna_tf32_f32
 define i32 @cvt_rna_tf32_f32(float %f1) {
+; CHECK-LABEL: cvt_rna_tf32_f32(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [cvt_rna_tf32_f32_param_0];
+; CHECK-NEXT:    cvt.rna.tf32.f32 %r1, %f1;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %val = call i32 @llvm.nvvm.f2tf32.rna(float %f1)
+  ret i32 %val
+}
 
-; CHECK: cvt.rna.tf32.f32
-  %val = call i32 @llvm.nvvm.f2tf32.rna(float %f1);
+declare i32 @llvm.nvvm.f2tf32.rna(float)
 
-ret i32 %val
+
+define <2 x bfloat> @fold_ff2bf16x2(float %lo, float %hi) {
+; CHECK-LABEL: fold_ff2bf16x2(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [fold_ff2bf16x2_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [fold_ff2bf16x2_param_1];
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r1, %f2, %f1;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %loh = fptrunc float %lo to bfloat
+  %hih = fptrunc float %hi to bfloat
+  %v0 = insertelement <2 x bfloat> poison, bfloat %loh, i64 0
+  %v1 = insertelement <2 x bfloat> %v0, bfloat %hih, i64 1
+  ret <2 x bfloat> %v1
 }
 
-declare i32 @llvm.nvvm.f2tf32.rna(float)
+define <2 x half> @fold_ff2f16x2(float %lo, float %hi) {
+; CHECK-LABEL: fold_ff2f16x2(
+; CHECK:       {
+; CHECK-NEXT:    .reg .b32 %r<2>;
+; CHECK-NEXT:    .reg .f32 %f<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT:  // %bb.0:
+; CHECK-NEXT:    ld.param.f32 %f1, [fold_ff2f16x2_param_0];
+; CHECK-NEXT:    ld.param.f32 %f2, [fold_ff2f16x2_param_1];
+; CHECK-NEXT:    cvt.rn.f16x2.f32 %r1, %f2, %f1;
+; CHECK-NEXT:    st.param.b32 [func_retval0], %r1;
+; CHECK-NEXT:    ret;
+  %loh = fptrunc float %lo to half
+  %hih = fptrunc float %hi to half
+  %v0 = insertelement <2 x half> poison, half %loh, i64 0
+  %v1 = insertelement <2 x half> %v0, half %hih, i64 1
+  ret <2 x half> %v1
+}

diff  --git a/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll b/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
index 8cc4548f6e85e0..40771784d1b28b 100644
--- a/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-relu-contract.ll
@@ -959,7 +959,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> %
 define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) #0 {
 ; CHECK-LABEL: fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b16 %rs<15>;
+; CHECK-NEXT:    .reg .b16 %rs<13>;
 ; CHECK-NEXT:    .reg .b32 %r<20>;
 ; CHECK-NEXT:    .reg .f32 %f<11>;
 ; CHECK-EMPTY:
@@ -971,40 +971,38 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
 ; CHECK-NEXT:    mov.b32 %r5, 0;
 ; CHECK-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; CHECK-NEXT:    cvt.u32.u16 %r7, %rs1;
+; CHECK-NEXT:    cvt.u32.u16 %r7, %rs2;
 ; CHECK-NEXT:    shl.b32 %r8, %r7, 16;
 ; CHECK-NEXT:    mov.b32 %f1, %r8;
 ; CHECK-NEXT:    add.f32 %f2, %f1, 0f40E00000;
 ; CHECK-NEXT:    cvt.rn.bf16.f32 %rs4, %f2;
-; CHECK-NEXT:    cvt.u32.u16 %r9, %rs2;
+; CHECK-NEXT:    cvt.u32.u16 %r9, %rs1;
 ; CHECK-NEXT:    shl.b32 %r10, %r9, 16;
 ; CHECK-NEXT:    mov.b32 %f3, %r10;
 ; CHECK-NEXT:    add.f32 %f4, %f3, 0f40E00000;
 ; CHECK-NEXT:    cvt.rn.bf16.f32 %rs6, %f4;
 ; CHECK-NEXT:    mov.b32 {%rs7, %rs8}, %r6;
-; CHECK-NEXT:    cvt.u32.u16 %r11, %rs8;
+; CHECK-NEXT:    cvt.u32.u16 %r11, %rs7;
 ; CHECK-NEXT:    shl.b32 %r12, %r11, 16;
 ; CHECK-NEXT:    mov.b32 %f5, %r12;
 ; CHECK-NEXT:    cvt.u32.u16 %r13, %rs6;
 ; CHECK-NEXT:    shl.b32 %r14, %r13, 16;
 ; CHECK-NEXT:    mov.b32 %f6, %r14;
 ; CHECK-NEXT:    add.f32 %f7, %f5, %f6;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs11, %f7;
-; CHECK-NEXT:    cvt.u32.u16 %r15, %rs7;
+; CHECK-NEXT:    cvt.u32.u16 %r15, %rs8;
 ; CHECK-NEXT:    shl.b32 %r16, %r15, 16;
 ; CHECK-NEXT:    mov.b32 %f8, %r16;
 ; CHECK-NEXT:    cvt.u32.u16 %r17, %rs4;
 ; CHECK-NEXT:    shl.b32 %r18, %r17, 16;
 ; CHECK-NEXT:    mov.b32 %f9, %r18;
 ; CHECK-NEXT:    add.f32 %f10, %f8, %f9;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs14, %f10;
-; CHECK-NEXT:    mov.b32 %r19, {%rs14, %rs11};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r19, %f10, %f7;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r19;
 ; CHECK-NEXT:    ret;
 ;
 ; CHECK-FTZ-LABEL: fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(
 ; CHECK-FTZ:       {
-; CHECK-FTZ-NEXT:    .reg .b16 %rs<15>;
+; CHECK-FTZ-NEXT:    .reg .b16 %rs<13>;
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<20>;
 ; CHECK-FTZ-NEXT:    .reg .f32 %f<11>;
 ; CHECK-FTZ-EMPTY:
@@ -1016,34 +1014,32 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 0;
 ; CHECK-FTZ-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r7, %rs1;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r7, %rs2;
 ; CHECK-FTZ-NEXT:    shl.b32 %r8, %r7, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f1, %r8;
 ; CHECK-FTZ-NEXT:    add.ftz.f32 %f2, %f1, 0f40E00000;
 ; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs4, %f2;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r9, %rs2;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r9, %rs1;
 ; CHECK-FTZ-NEXT:    shl.b32 %r10, %r9, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f3, %r10;
 ; CHECK-FTZ-NEXT:    add.ftz.f32 %f4, %f3, 0f40E00000;
 ; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs6, %f4;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs7, %rs8}, %r6;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r11, %rs8;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r11, %rs7;
 ; CHECK-FTZ-NEXT:    shl.b32 %r12, %r11, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f5, %r12;
 ; CHECK-FTZ-NEXT:    cvt.u32.u16 %r13, %rs6;
 ; CHECK-FTZ-NEXT:    shl.b32 %r14, %r13, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f6, %r14;
 ; CHECK-FTZ-NEXT:    add.ftz.f32 %f7, %f5, %f6;
-; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs11, %f7;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r15, %rs7;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r15, %rs8;
 ; CHECK-FTZ-NEXT:    shl.b32 %r16, %r15, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f8, %r16;
 ; CHECK-FTZ-NEXT:    cvt.u32.u16 %r17, %rs4;
 ; CHECK-FTZ-NEXT:    shl.b32 %r18, %r17, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f9, %r18;
 ; CHECK-FTZ-NEXT:    add.ftz.f32 %f10, %f8, %f9;
-; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs14, %f10;
-; CHECK-FTZ-NEXT:    mov.b32 %r19, {%rs14, %rs11};
+; CHECK-FTZ-NEXT:    cvt.rn.bf16x2.f32 %r19, %f10, %f7;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r19;
 ; CHECK-FTZ-NEXT:    ret;
 ;

diff  --git a/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll b/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
index 16219aa9da0950..4d61ce1478953a 100644
--- a/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-relu-fma-intrinsic.ll
@@ -642,7 +642,7 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
 define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c) #0 {
 ; CHECK-LABEL: fma_bf16x2_no_nans_multiple_uses_of_fma(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b16 %rs<11>;
+; CHECK-NEXT:    .reg .b16 %rs<9>;
 ; CHECK-NEXT:    .reg .b32 %r<14>;
 ; CHECK-NEXT:    .reg .f32 %f<9>;
 ; CHECK-EMPTY:
@@ -652,12 +652,12 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; CHECK-NEXT:    cvt.u32.u16 %r5, %rs1;
+; CHECK-NEXT:    cvt.u32.u16 %r5, %rs2;
 ; CHECK-NEXT:    shl.b32 %r6, %r5, 16;
 ; CHECK-NEXT:    mov.b32 %f1, %r6;
 ; CHECK-NEXT:    add.f32 %f2, %f1, 0f40E00000;
 ; CHECK-NEXT:    cvt.rn.bf16.f32 %rs4, %f2;
-; CHECK-NEXT:    cvt.u32.u16 %r7, %rs2;
+; CHECK-NEXT:    cvt.u32.u16 %r7, %rs1;
 ; CHECK-NEXT:    shl.b32 %r8, %r7, 16;
 ; CHECK-NEXT:    mov.b32 %f3, %r8;
 ; CHECK-NEXT:    add.f32 %f4, %f3, 0f40E00000;
@@ -666,19 +666,17 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-NEXT:    shl.b32 %r10, %r9, 16;
 ; CHECK-NEXT:    mov.b32 %f5, %r10;
 ; CHECK-NEXT:    add.f32 %f6, %f5, %f3;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs8, %f6;
 ; CHECK-NEXT:    cvt.u32.u16 %r11, %rs4;
 ; CHECK-NEXT:    shl.b32 %r12, %r11, 16;
 ; CHECK-NEXT:    mov.b32 %f7, %r12;
 ; CHECK-NEXT:    add.f32 %f8, %f7, %f1;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs10, %f8;
-; CHECK-NEXT:    mov.b32 %r13, {%rs10, %rs8};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r13, %f8, %f6;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r13;
 ; CHECK-NEXT:    ret;
 ;
 ; CHECK-FTZ-LABEL: fma_bf16x2_no_nans_multiple_uses_of_fma(
 ; CHECK-FTZ:       {
-; CHECK-FTZ-NEXT:    .reg .b16 %rs<11>;
+; CHECK-FTZ-NEXT:    .reg .b16 %rs<9>;
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<14>;
 ; CHECK-FTZ-NEXT:    .reg .f32 %f<9>;
 ; CHECK-FTZ-EMPTY:
@@ -688,12 +686,12 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r5, %rs1;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r5, %rs2;
 ; CHECK-FTZ-NEXT:    shl.b32 %r6, %r5, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f1, %r6;
 ; CHECK-FTZ-NEXT:    add.ftz.f32 %f2, %f1, 0f40E00000;
 ; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs4, %f2;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r7, %rs2;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r7, %rs1;
 ; CHECK-FTZ-NEXT:    shl.b32 %r8, %r7, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f3, %r8;
 ; CHECK-FTZ-NEXT:    add.ftz.f32 %f4, %f3, 0f40E00000;
@@ -702,13 +700,11 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-FTZ-NEXT:    shl.b32 %r10, %r9, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f5, %r10;
 ; CHECK-FTZ-NEXT:    add.ftz.f32 %f6, %f5, %f3;
-; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs8, %f6;
 ; CHECK-FTZ-NEXT:    cvt.u32.u16 %r11, %rs4;
 ; CHECK-FTZ-NEXT:    shl.b32 %r12, %r11, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f7, %r12;
 ; CHECK-FTZ-NEXT:    add.ftz.f32 %f8, %f7, %f1;
-; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs10, %f8;
-; CHECK-FTZ-NEXT:    mov.b32 %r13, {%rs10, %rs8};
+; CHECK-FTZ-NEXT:    cvt.rn.bf16x2.f32 %r13, %f8, %f6;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r13;
 ; CHECK-FTZ-NEXT:    ret;
 ;

diff  --git a/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll b/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll
index af21bada7783be..043c0fcca63710 100644
--- a/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll
+++ b/llvm/test/CodeGen/NVPTX/fma-relu-instruction-flag.ll
@@ -694,7 +694,7 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans(<2 x bfloat> %a, <2 x bfloat> %
 define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c)  {
 ; CHECK-LABEL: fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b16 %rs<15>;
+; CHECK-NEXT:    .reg .b16 %rs<13>;
 ; CHECK-NEXT:    .reg .b32 %r<20>;
 ; CHECK-NEXT:    .reg .f32 %f<11>;
 ; CHECK-EMPTY:
@@ -706,40 +706,38 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
 ; CHECK-NEXT:    mov.b32 %r5, 0;
 ; CHECK-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; CHECK-NEXT:    cvt.u32.u16 %r7, %rs1;
+; CHECK-NEXT:    cvt.u32.u16 %r7, %rs2;
 ; CHECK-NEXT:    shl.b32 %r8, %r7, 16;
 ; CHECK-NEXT:    mov.b32 %f1, %r8;
 ; CHECK-NEXT:    add.rn.f32 %f2, %f1, 0f40E00000;
 ; CHECK-NEXT:    cvt.rn.bf16.f32 %rs4, %f2;
-; CHECK-NEXT:    cvt.u32.u16 %r9, %rs2;
+; CHECK-NEXT:    cvt.u32.u16 %r9, %rs1;
 ; CHECK-NEXT:    shl.b32 %r10, %r9, 16;
 ; CHECK-NEXT:    mov.b32 %f3, %r10;
 ; CHECK-NEXT:    add.rn.f32 %f4, %f3, 0f40E00000;
 ; CHECK-NEXT:    cvt.rn.bf16.f32 %rs6, %f4;
 ; CHECK-NEXT:    mov.b32 {%rs7, %rs8}, %r6;
-; CHECK-NEXT:    cvt.u32.u16 %r11, %rs8;
+; CHECK-NEXT:    cvt.u32.u16 %r11, %rs7;
 ; CHECK-NEXT:    shl.b32 %r12, %r11, 16;
 ; CHECK-NEXT:    mov.b32 %f5, %r12;
 ; CHECK-NEXT:    cvt.u32.u16 %r13, %rs6;
 ; CHECK-NEXT:    shl.b32 %r14, %r13, 16;
 ; CHECK-NEXT:    mov.b32 %f6, %r14;
 ; CHECK-NEXT:    add.rn.f32 %f7, %f5, %f6;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs11, %f7;
-; CHECK-NEXT:    cvt.u32.u16 %r15, %rs7;
+; CHECK-NEXT:    cvt.u32.u16 %r15, %rs8;
 ; CHECK-NEXT:    shl.b32 %r16, %r15, 16;
 ; CHECK-NEXT:    mov.b32 %f8, %r16;
 ; CHECK-NEXT:    cvt.u32.u16 %r17, %rs4;
 ; CHECK-NEXT:    shl.b32 %r18, %r17, 16;
 ; CHECK-NEXT:    mov.b32 %f9, %r18;
 ; CHECK-NEXT:    add.rn.f32 %f10, %f8, %f9;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs14, %f10;
-; CHECK-NEXT:    mov.b32 %r19, {%rs14, %rs11};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r19, %f10, %f7;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r19;
 ; CHECK-NEXT:    ret;
 ;
 ; CHECK-FTZ-LABEL: fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(
 ; CHECK-FTZ:       {
-; CHECK-FTZ-NEXT:    .reg .b16 %rs<15>;
+; CHECK-FTZ-NEXT:    .reg .b16 %rs<13>;
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<20>;
 ; CHECK-FTZ-NEXT:    .reg .f32 %f<11>;
 ; CHECK-FTZ-EMPTY:
@@ -751,34 +749,32 @@ define <2 x bfloat> @fma_bf16x2_expanded_no_nans_multiple_uses_of_fma(<2 x bfloa
 ; CHECK-FTZ-NEXT:    mov.b32 %r5, 0;
 ; CHECK-FTZ-NEXT:    max.bf16x2 %r6, %r4, %r5;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r7, %rs1;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r7, %rs2;
 ; CHECK-FTZ-NEXT:    shl.b32 %r8, %r7, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f1, %r8;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f32 %f2, %f1, 0f40E00000;
 ; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs4, %f2;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r9, %rs2;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r9, %rs1;
 ; CHECK-FTZ-NEXT:    shl.b32 %r10, %r9, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f3, %r10;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f32 %f4, %f3, 0f40E00000;
 ; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs6, %f4;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs7, %rs8}, %r6;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r11, %rs8;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r11, %rs7;
 ; CHECK-FTZ-NEXT:    shl.b32 %r12, %r11, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f5, %r12;
 ; CHECK-FTZ-NEXT:    cvt.u32.u16 %r13, %rs6;
 ; CHECK-FTZ-NEXT:    shl.b32 %r14, %r13, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f6, %r14;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f32 %f7, %f5, %f6;
-; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs11, %f7;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r15, %rs7;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r15, %rs8;
 ; CHECK-FTZ-NEXT:    shl.b32 %r16, %r15, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f8, %r16;
 ; CHECK-FTZ-NEXT:    cvt.u32.u16 %r17, %rs4;
 ; CHECK-FTZ-NEXT:    shl.b32 %r18, %r17, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f9, %r18;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f32 %f10, %f8, %f9;
-; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs14, %f10;
-; CHECK-FTZ-NEXT:    mov.b32 %r19, {%rs14, %rs11};
+; CHECK-FTZ-NEXT:    cvt.rn.bf16x2.f32 %r19, %f10, %f7;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r19;
 ; CHECK-FTZ-NEXT:    ret;
 ;
@@ -1637,7 +1633,7 @@ define <2 x bfloat> @fma_bf16x2_no_nans(<2 x bfloat> %a, <2 x bfloat> %b, <2 x b
 define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2 x bfloat> %b, <2 x bfloat> %c)  {
 ; CHECK-LABEL: fma_bf16x2_no_nans_multiple_uses_of_fma(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b16 %rs<11>;
+; CHECK-NEXT:    .reg .b16 %rs<9>;
 ; CHECK-NEXT:    .reg .b32 %r<14>;
 ; CHECK-NEXT:    .reg .f32 %f<9>;
 ; CHECK-EMPTY:
@@ -1647,12 +1643,12 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
 ; CHECK-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; CHECK-NEXT:    cvt.u32.u16 %r5, %rs1;
+; CHECK-NEXT:    cvt.u32.u16 %r5, %rs2;
 ; CHECK-NEXT:    shl.b32 %r6, %r5, 16;
 ; CHECK-NEXT:    mov.b32 %f1, %r6;
 ; CHECK-NEXT:    add.rn.f32 %f2, %f1, 0f40E00000;
 ; CHECK-NEXT:    cvt.rn.bf16.f32 %rs4, %f2;
-; CHECK-NEXT:    cvt.u32.u16 %r7, %rs2;
+; CHECK-NEXT:    cvt.u32.u16 %r7, %rs1;
 ; CHECK-NEXT:    shl.b32 %r8, %r7, 16;
 ; CHECK-NEXT:    mov.b32 %f3, %r8;
 ; CHECK-NEXT:    add.rn.f32 %f4, %f3, 0f40E00000;
@@ -1661,19 +1657,17 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-NEXT:    shl.b32 %r10, %r9, 16;
 ; CHECK-NEXT:    mov.b32 %f5, %r10;
 ; CHECK-NEXT:    add.rn.f32 %f6, %f5, %f3;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs8, %f6;
 ; CHECK-NEXT:    cvt.u32.u16 %r11, %rs4;
 ; CHECK-NEXT:    shl.b32 %r12, %r11, 16;
 ; CHECK-NEXT:    mov.b32 %f7, %r12;
 ; CHECK-NEXT:    add.rn.f32 %f8, %f7, %f1;
-; CHECK-NEXT:    cvt.rn.bf16.f32 %rs10, %f8;
-; CHECK-NEXT:    mov.b32 %r13, {%rs10, %rs8};
+; CHECK-NEXT:    cvt.rn.bf16x2.f32 %r13, %f8, %f6;
 ; CHECK-NEXT:    st.param.b32 [func_retval0], %r13;
 ; CHECK-NEXT:    ret;
 ;
 ; CHECK-FTZ-LABEL: fma_bf16x2_no_nans_multiple_uses_of_fma(
 ; CHECK-FTZ:       {
-; CHECK-FTZ-NEXT:    .reg .b16 %rs<11>;
+; CHECK-FTZ-NEXT:    .reg .b16 %rs<9>;
 ; CHECK-FTZ-NEXT:    .reg .b32 %r<14>;
 ; CHECK-FTZ-NEXT:    .reg .f32 %f<9>;
 ; CHECK-FTZ-EMPTY:
@@ -1683,12 +1677,12 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-FTZ-NEXT:    ld.param.b32 %r3, [fma_bf16x2_no_nans_multiple_uses_of_fma_param_0];
 ; CHECK-FTZ-NEXT:    fma.rn.bf16x2 %r4, %r3, %r2, %r1;
 ; CHECK-FTZ-NEXT:    mov.b32 {%rs1, %rs2}, %r4;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r5, %rs1;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r5, %rs2;
 ; CHECK-FTZ-NEXT:    shl.b32 %r6, %r5, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f1, %r6;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f32 %f2, %f1, 0f40E00000;
 ; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs4, %f2;
-; CHECK-FTZ-NEXT:    cvt.u32.u16 %r7, %rs2;
+; CHECK-FTZ-NEXT:    cvt.u32.u16 %r7, %rs1;
 ; CHECK-FTZ-NEXT:    shl.b32 %r8, %r7, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f3, %r8;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f32 %f4, %f3, 0f40E00000;
@@ -1697,13 +1691,11 @@ define <2 x bfloat> @fma_bf16x2_no_nans_multiple_uses_of_fma(<2 x bfloat> %a, <2
 ; CHECK-FTZ-NEXT:    shl.b32 %r10, %r9, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f5, %r10;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f32 %f6, %f5, %f3;
-; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs8, %f6;
 ; CHECK-FTZ-NEXT:    cvt.u32.u16 %r11, %rs4;
 ; CHECK-FTZ-NEXT:    shl.b32 %r12, %r11, 16;
 ; CHECK-FTZ-NEXT:    mov.b32 %f7, %r12;
 ; CHECK-FTZ-NEXT:    add.rn.ftz.f32 %f8, %f7, %f1;
-; CHECK-FTZ-NEXT:    cvt.rn.bf16.f32 %rs10, %f8;
-; CHECK-FTZ-NEXT:    mov.b32 %r13, {%rs10, %rs8};
+; CHECK-FTZ-NEXT:    cvt.rn.bf16x2.f32 %r13, %f8, %f6;
 ; CHECK-FTZ-NEXT:    st.param.b32 [func_retval0], %r13;
 ; CHECK-FTZ-NEXT:    ret;
 ;


        


More information about the llvm-commits mailing list