[llvm] [AMDGPU] Form V_MAD_U64_U32 from mul24/mulhi24 (PR #72393)

Pierre van Houtryve via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 16 00:31:03 PST 2023


https://github.com/Pierre-vh updated https://github.com/llvm/llvm-project/pull/72393

>From a2a1412deba4989a7d4edd3f3f7fdece38917430 Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Wed, 15 Nov 2023 15:20:32 +0100
Subject: [PATCH 1/2] [AMDGPU] Form V_MAD_U64_U32 from mul24/mulhi24

CGP can transform a fine mul+add into a (mul24/mulhi24)+add, so add a pattern for that.

See SWDEV-421067
---
 llvm/lib/Target/AMDGPU/VOP3Instructions.td    |  9 +++
 .../CodeGen/AMDGPU/integer-mad-patterns.ll    | 63 +++++++++++++++++++
 2 files changed, 72 insertions(+)

diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 114d33b077866a1..06856b03a508124 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -676,6 +676,15 @@ multiclass IMAD32_Pats <VOP3_Pseudo inst> {
         (ThreeOpFragSDAG<mul, add> i32:$src0, i32:$src1, (i32 imm:$src2)),
         (EXTRACT_SUBREG (inst $src0, $src1, (i64 (as_i64imm $src2)), 0 /* clamp */), sub0)
         >;
+
+  // Handle cases where amdgpu-codegenprepare-mul24 made a mul24 instead of a normal mul.
+  def : GCNPat <
+      (i64 (add (bitconvert (v2i32 (build_vector
+                                     (AMDGPUmul_u24 i32:$src0, i32:$src1),
+                                     (AMDGPUmulhi_u24 i32:$src0, i32:$src1)))),
+                i64:$src2)),
+      (inst $src0, $src1, $src2, 0 /* clamp */)
+      >;
 }
 
 // exclude pre-GFX9 where it was slow
diff --git a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
index 61017e809c86365..56b6fef1b82255c 100644
--- a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
@@ -6928,6 +6928,69 @@ entry:
   ret <2 x i16> %add0
 }
 
+define i64 @mul_mulhi_u24(i32 %x, i32 %y, i64 %z) {
+; GFX67-LABEL: mul_mulhi_u24:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mul_u32_u24_e32 v4, v0, v1
+; GFX67-NEXT:    v_mul_hi_u32_u24_e32 v1, v0, v1
+; GFX67-NEXT:    v_add_i32_e32 v0, vcc, v4, v2
+; GFX67-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: mul_mulhi_u24:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_u32_u24_e32 v4, v0, v1
+; GFX8-NEXT:    v_mul_hi_u32_u24_e32 v1, v0, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v4, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: mul_mulhi_u24:
+; GFX9-SDAG:       ; %bb.0:
+; GFX9-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v1, v[2:3]
+; GFX9-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-GISEL-LABEL: mul_mulhi_u24:
+; GFX9-GISEL:       ; %bb.0:
+; GFX9-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT:    v_mul_u32_u24_e32 v4, v0, v1
+; GFX9-GISEL-NEXT:    v_mul_hi_u32_u24_e32 v1, v0, v1
+; GFX9-GISEL-NEXT:    v_add_co_u32_e32 v0, vcc, v4, v2
+; GFX9-GISEL-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: mul_mulhi_u24:
+; GFX10-SDAG:       ; %bb.0:
+; GFX10-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT:    v_mad_u64_u32 v[0:1], null, v0, v1, v[2:3]
+; GFX10-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: mul_mulhi_u24:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    v_mul_u32_u24_e32 v4, v0, v1
+; GFX10-GISEL-NEXT:    v_mul_hi_u32_u24_e32 v1, v0, v1
+; GFX10-GISEL-NEXT:    v_add_co_u32 v0, vcc_lo, v4, v2
+; GFX10-GISEL-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %mul = call i32 @llvm.amdgcn.mul.u24(i32 %x, i32 %y)
+  %mulhi = call i32 @llvm.amdgcn.mulhi.u24(i32 %x, i32 %y)
+  %mul.zext = zext i32 %mul to i64
+  %mulhi.zext = zext i32 %mulhi to i64
+  %mulhi.shift = shl i64 %mulhi.zext, 32
+  %mul.mulhi = or i64 %mulhi.shift, %mul.zext
+  %add = add nuw nsw i64 %mul.mulhi, %z
+  ret i64 %add
+}
+
+declare i32 @llvm.amdgcn.mul.u24(i32, i32)
+declare i32 @llvm.amdgcn.mulhi.u24(i32, i32)
+declare i32 @llvm.amdgcn.mul.i24(i32, i32)
+declare i32 @llvm.amdgcn.mulhi.i24(i32, i32)
+
 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
 ; GFX6: {{.*}}
 ; GFX7: {{.*}}

>From 1f341bd739727499e9899cfcfa22b4e7734c6fad Mon Sep 17 00:00:00 2001
From: pvanhout <pierre.vanhoutryve at amd.com>
Date: Thu, 16 Nov 2023 09:30:48 +0100
Subject: [PATCH 2/2] Add mul24 only pattern

---
 llvm/lib/Target/AMDGPU/VOP3Instructions.td    |  4 ++
 .../CodeGen/AMDGPU/integer-mad-patterns.ll    | 64 +++++++++++++++--
 ...ne-sink-temporal-divergence-swdev407790.ll | 68 +++++++++----------
 3 files changed, 93 insertions(+), 43 deletions(-)

diff --git a/llvm/lib/Target/AMDGPU/VOP3Instructions.td b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
index 06856b03a508124..5a5c6d3465482b0 100644
--- a/llvm/lib/Target/AMDGPU/VOP3Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP3Instructions.td
@@ -678,6 +678,10 @@ multiclass IMAD32_Pats <VOP3_Pseudo inst> {
         >;
 
   // Handle cases where amdgpu-codegenprepare-mul24 made a mul24 instead of a normal mul.
+  def : GCNPat <
+      (i64 (add (i64 (zext (AMDGPUmul_u24 i32:$src0, i32:$src1))), i64:$src2)),
+      (inst $src0, $src1, $src2, 0 /* clamp */)
+      >;
   def : GCNPat <
       (i64 (add (bitconvert (v2i32 (build_vector
                                      (AMDGPUmul_u24 i32:$src0, i32:$src1),
diff --git a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
index 56b6fef1b82255c..9a3e815f14c98fb 100644
--- a/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
+++ b/llvm/test/CodeGen/AMDGPU/integer-mad-patterns.ll
@@ -6928,8 +6928,8 @@ entry:
   ret <2 x i16> %add0
 }
 
-define i64 @mul_mulhi_u24(i32 %x, i32 %y, i64 %z) {
-; GFX67-LABEL: mul_mulhi_u24:
+define i64 @mul_mulhi_u24_add64(i32 %x, i32 %y, i64 %z) {
+; GFX67-LABEL: mul_mulhi_u24_add64:
 ; GFX67:       ; %bb.0:
 ; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX67-NEXT:    v_mul_u32_u24_e32 v4, v0, v1
@@ -6938,7 +6938,7 @@ define i64 @mul_mulhi_u24(i32 %x, i32 %y, i64 %z) {
 ; GFX67-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
 ; GFX67-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX8-LABEL: mul_mulhi_u24:
+; GFX8-LABEL: mul_mulhi_u24_add64:
 ; GFX8:       ; %bb.0:
 ; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX8-NEXT:    v_mul_u32_u24_e32 v4, v0, v1
@@ -6947,13 +6947,13 @@ define i64 @mul_mulhi_u24(i32 %x, i32 %y, i64 %z) {
 ; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, v1, v3, vcc
 ; GFX8-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX9-SDAG-LABEL: mul_mulhi_u24:
+; GFX9-SDAG-LABEL: mul_mulhi_u24_add64:
 ; GFX9-SDAG:       ; %bb.0:
 ; GFX9-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v1, v[2:3]
 ; GFX9-SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX9-GISEL-LABEL: mul_mulhi_u24:
+; GFX9-GISEL-LABEL: mul_mulhi_u24_add64:
 ; GFX9-GISEL:       ; %bb.0:
 ; GFX9-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX9-GISEL-NEXT:    v_mul_u32_u24_e32 v4, v0, v1
@@ -6962,13 +6962,13 @@ define i64 @mul_mulhi_u24(i32 %x, i32 %y, i64 %z) {
 ; GFX9-GISEL-NEXT:    v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
 ; GFX9-GISEL-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-SDAG-LABEL: mul_mulhi_u24:
+; GFX10-SDAG-LABEL: mul_mulhi_u24_add64:
 ; GFX10-SDAG:       ; %bb.0:
 ; GFX10-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-SDAG-NEXT:    v_mad_u64_u32 v[0:1], null, v0, v1, v[2:3]
 ; GFX10-SDAG-NEXT:    s_setpc_b64 s[30:31]
 ;
-; GFX10-GISEL-LABEL: mul_mulhi_u24:
+; GFX10-GISEL-LABEL: mul_mulhi_u24_add64:
 ; GFX10-GISEL:       ; %bb.0:
 ; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; GFX10-GISEL-NEXT:    v_mul_u32_u24_e32 v4, v0, v1
@@ -6986,6 +6986,56 @@ define i64 @mul_mulhi_u24(i32 %x, i32 %y, i64 %z) {
   ret i64 %add
 }
 
+define i64 @mul_u24_add64(i32 %x, i32 %y, i64 %z) {
+; GFX67-LABEL: mul_u24_add64:
+; GFX67:       ; %bb.0:
+; GFX67-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX67-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX67-NEXT:    v_add_i32_e32 v0, vcc, v0, v2
+; GFX67-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX67-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: mul_u24_add64:
+; GFX8:       ; %bb.0:
+; GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX8-NEXT:    v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT:    v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; GFX8-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-SDAG-LABEL: mul_u24_add64:
+; GFX9-SDAG:       ; %bb.0:
+; GFX9-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-SDAG-NEXT:    v_mad_u64_u32 v[0:1], s[4:5], v0, v1, v[2:3]
+; GFX9-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX9-GISEL-LABEL: mul_u24_add64:
+; GFX9-GISEL:       ; %bb.0:
+; GFX9-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-GISEL-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX9-GISEL-NEXT:    v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-GISEL-NEXT:    v_addc_co_u32_e32 v1, vcc, 0, v3, vcc
+; GFX9-GISEL-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-SDAG-LABEL: mul_u24_add64:
+; GFX10-SDAG:       ; %bb.0:
+; GFX10-SDAG-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-SDAG-NEXT:    v_mad_u64_u32 v[0:1], null, v0, v1, v[2:3]
+; GFX10-SDAG-NEXT:    s_setpc_b64 s[30:31]
+;
+; GFX10-GISEL-LABEL: mul_u24_add64:
+; GFX10-GISEL:       ; %bb.0:
+; GFX10-GISEL-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-GISEL-NEXT:    v_mul_u32_u24_e32 v0, v0, v1
+; GFX10-GISEL-NEXT:    v_add_co_u32 v0, vcc_lo, v0, v2
+; GFX10-GISEL-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo
+; GFX10-GISEL-NEXT:    s_setpc_b64 s[30:31]
+  %mul = call i32 @llvm.amdgcn.mul.u24(i32 %x, i32 %y)
+  %mul.zext = zext i32 %mul to i64
+  %add = add nuw nsw i64 %mul.zext, %z
+  ret i64 %add
+}
+
 declare i32 @llvm.amdgcn.mul.u24(i32, i32)
 declare i32 @llvm.amdgcn.mulhi.u24(i32, i32)
 declare i32 @llvm.amdgcn.mul.i24(i32, i32)
diff --git a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
index 75f3b5463c3944b..46089960db28186 100644
--- a/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
+++ b/llvm/test/CodeGen/AMDGPU/machine-sink-temporal-divergence-swdev407790.ll
@@ -347,18 +347,18 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_cmpx_gt_u32_e64 v47, v40
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_33
 ; CHECK-NEXT:  ; %bb.26:
-; CHECK-NEXT:    s_add_u32 s52, s44, 8
-; CHECK-NEXT:    s_addc_u32 s53, s45, 0
-; CHECK-NEXT:    s_getpc_b64 s[42:43]
-; CHECK-NEXT:    s_add_u32 s42, s42, _Z10atomic_addPU3AS1Vjj at rel32@lo+4
-; CHECK-NEXT:    s_addc_u32 s43, s43, _Z10atomic_addPU3AS1Vjj at rel32@hi+12
-; CHECK-NEXT:    s_mov_b32 s54, 0
+; CHECK-NEXT:    s_add_u32 s42, s44, 8
+; CHECK-NEXT:    s_addc_u32 s43, s45, 0
 ; CHECK-NEXT:    s_getpc_b64 s[44:45]
-; CHECK-NEXT:    s_add_u32 s44, s44, _Z10atomic_subPU3AS1Vjj at rel32@lo+4
-; CHECK-NEXT:    s_addc_u32 s45, s45, _Z10atomic_subPU3AS1Vjj at rel32@hi+12
+; CHECK-NEXT:    s_add_u32 s44, s44, _Z10atomic_addPU3AS1Vjj at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s45, s45, _Z10atomic_addPU3AS1Vjj at rel32@hi+12
+; CHECK-NEXT:    s_mov_b32 s54, 0
 ; CHECK-NEXT:    s_getpc_b64 s[48:49]
-; CHECK-NEXT:    s_add_u32 s48, s48, _Z14get_local_sizej at rel32@lo+4
-; CHECK-NEXT:    s_addc_u32 s49, s49, _Z14get_local_sizej at rel32@hi+12
+; CHECK-NEXT:    s_add_u32 s48, s48, _Z10atomic_subPU3AS1Vjj at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s49, s49, _Z10atomic_subPU3AS1Vjj at rel32@hi+12
+; CHECK-NEXT:    s_getpc_b64 s[52:53]
+; CHECK-NEXT:    s_add_u32 s52, s52, _Z14get_local_sizej at rel32@lo+4
+; CHECK-NEXT:    s_addc_u32 s53, s53, _Z14get_local_sizej at rel32@hi+12
 ; CHECK-NEXT:    s_branch .LBB0_28
 ; CHECK-NEXT:  .LBB0_27: ; in Loop: Header=BB0_28 Depth=1
 ; CHECK-NEXT:    s_or_b32 exec_lo, exec_lo, s55
@@ -371,7 +371,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_mov_b32 s12, s41
 ; CHECK-NEXT:    s_mov_b32 s13, s40
 ; CHECK-NEXT:    s_mov_b32 s14, s33
-; CHECK-NEXT:    s_swappc_b64 s[30:31], s[48:49]
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[52:53]
 ; CHECK-NEXT:    v_add_co_u32 v40, vcc_lo, v0, v40
 ; CHECK-NEXT:    v_cmp_le_u32_e32 vcc_lo, v47, v40
 ; CHECK-NEXT:    s_or_b32 s54, vcc_lo, s54
@@ -385,11 +385,9 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_lshrrev_b32_e32 v63, 10, v0
 ; CHECK-NEXT:    v_bfe_u32 v62, v0, 5, 5
 ; CHECK-NEXT:    v_and_b32_e32 v72, 31, v0
-; CHECK-NEXT:    v_mul_u32_u24_e32 v1, 0x180, v63
+; CHECK-NEXT:    v_mad_u64_u32 v[2:3], null, 0x180, v63, s[42:43]
 ; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 5, v62
 ; CHECK-NEXT:    v_lshlrev_b32_e32 v4, 5, v72
-; CHECK-NEXT:    v_add_co_u32 v2, s4, s52, v1
-; CHECK-NEXT:    v_add_co_ci_u32_e64 v3, null, s53, 0, s4
 ; CHECK-NEXT:    v_add_co_u32 v0, vcc_lo, v2, v0
 ; CHECK-NEXT:    v_add_co_ci_u32_e32 v1, vcc_lo, 0, v3, vcc_lo
 ; CHECK-NEXT:    v_add_co_u32 v2, vcc_lo, v2, v4
@@ -437,37 +435,35 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    v_mov_b32_e32 v0, v42
 ; CHECK-NEXT:    s_mov_b64 s[4:5], s[38:39]
 ; CHECK-NEXT:    v_mov_b32_e32 v1, v43
-; CHECK-NEXT:    s_swappc_b64 s[30:31], s[42:43]
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[44:45]
 ; CHECK-NEXT:    v_bfe_u32 v0, v0, v74, 4
 ; CHECK-NEXT:    s_mov_b32 s4, exec_lo
 ; CHECK-NEXT:    v_cmpx_gt_u32_e32 12, v0
 ; CHECK-NEXT:    s_xor_b32 s4, exec_lo, s4
 ; CHECK-NEXT:    s_cbranch_execz .LBB0_31
 ; CHECK-NEXT:  ; %bb.30: ; in Loop: Header=BB0_28 Depth=1
-; CHECK-NEXT:    v_xor_b32_e32 v5, v60, v58
-; CHECK-NEXT:    v_lshrrev_b64 v[3:4], 16, v[56:57]
-; CHECK-NEXT:    v_mul_u32_u24_e32 v11, 0x180, v73
-; CHECK-NEXT:    v_lshlrev_b32_e32 v0, 5, v0
-; CHECK-NEXT:    v_lshrrev_b64 v[1:2], 16, v[45:46]
-; CHECK-NEXT:    v_lshlrev_b32_e32 v7, 16, v5
+; CHECK-NEXT:    v_xor_b32_e32 v4, v60, v58
+; CHECK-NEXT:    v_lshrrev_b64 v[2:3], 16, v[56:57]
+; CHECK-NEXT:    v_mad_u64_u32 v[6:7], null, 0x180, v73, s[46:47]
+; CHECK-NEXT:    v_lshlrev_b32_e32 v10, 5, v0
+; CHECK-NEXT:    v_lshlrev_b32_e32 v1, 16, v4
 ; CHECK-NEXT:    v_lshlrev_b32_e32 v8, 6, v72
-; CHECK-NEXT:    v_lshlrev_b32_e32 v10, 12, v63
-; CHECK-NEXT:    v_xor_b32_e32 v6, v61, v59
-; CHECK-NEXT:    v_lshlrev_b32_e32 v9, 16, v56
-; CHECK-NEXT:    v_or_b32_e32 v4, v7, v4
-; CHECK-NEXT:    v_add_co_u32 v7, s5, s46, v11
-; CHECK-NEXT:    v_add_co_ci_u32_e64 v11, null, s47, 0, s5
-; CHECK-NEXT:    v_or3_b32 v10, v8, v10, v62
-; CHECK-NEXT:    v_add_co_u32 v7, vcc_lo, v7, v0
-; CHECK-NEXT:    v_add_co_ci_u32_e32 v8, vcc_lo, 0, v11, vcc_lo
-; CHECK-NEXT:    v_lshrrev_b64 v[5:6], 16, v[5:6]
-; CHECK-NEXT:    v_or_b32_e32 v2, v9, v2
-; CHECK-NEXT:    global_store_dword v[7:8], v10, off offset:4
-; CHECK-NEXT:    global_store_dwordx4 v[7:8], v[1:4], off offset:8
-; CHECK-NEXT:    global_store_dwordx2 v[7:8], v[5:6], off offset:24
+; CHECK-NEXT:    v_lshlrev_b32_e32 v9, 12, v63
+; CHECK-NEXT:    v_xor_b32_e32 v5, v61, v59
+; CHECK-NEXT:    v_lshlrev_b32_e32 v11, 16, v56
+; CHECK-NEXT:    v_or_b32_e32 v3, v1, v3
+; CHECK-NEXT:    v_lshrrev_b64 v[0:1], 16, v[45:46]
+; CHECK-NEXT:    v_add_co_u32 v6, vcc_lo, v6, v10
+; CHECK-NEXT:    v_or3_b32 v8, v8, v9, v62
+; CHECK-NEXT:    v_add_co_ci_u32_e32 v7, vcc_lo, 0, v7, vcc_lo
+; CHECK-NEXT:    v_lshrrev_b64 v[4:5], 16, v[4:5]
+; CHECK-NEXT:    v_or_b32_e32 v1, v11, v1
 ; CHECK-NEXT:    ; implicit-def: $vgpr42
 ; CHECK-NEXT:    ; implicit-def: $vgpr43
 ; CHECK-NEXT:    ; implicit-def: $vgpr44
+; CHECK-NEXT:    global_store_dword v[6:7], v8, off offset:4
+; CHECK-NEXT:    global_store_dwordx4 v[6:7], v[0:3], off offset:8
+; CHECK-NEXT:    global_store_dwordx2 v[6:7], v[4:5], off offset:24
 ; CHECK-NEXT:  .LBB0_31: ; %Flow
 ; CHECK-NEXT:    ; in Loop: Header=BB0_28 Depth=1
 ; CHECK-NEXT:    s_andn2_saveexec_b32 s4, s4
@@ -484,7 +480,7 @@ define protected amdgpu_kernel void @kernel_round1(ptr addrspace(1) nocapture no
 ; CHECK-NEXT:    s_mov_b32 s12, s41
 ; CHECK-NEXT:    s_mov_b32 s13, s40
 ; CHECK-NEXT:    s_mov_b32 s14, s33
-; CHECK-NEXT:    s_swappc_b64 s[30:31], s[44:45]
+; CHECK-NEXT:    s_swappc_b64 s[30:31], s[48:49]
 ; CHECK-NEXT:    s_branch .LBB0_27
 ; CHECK-NEXT:  .LBB0_33:
 ; CHECK-NEXT:    s_endpgm



More information about the llvm-commits mailing list