[llvm] f7076cf - [DAGCombiner][RISCV][AMDGPU] Call SimplifyDemandedBits at the end of visitMULHU to enable known bits contant folding.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Thu Aug 5 08:37:53 PDT 2021


Author: Craig Topper
Date: 2021-08-05T08:31:26-07:00
New Revision: f7076cfd3ad39ac71314c2deb5c5517c7c6bfd61

URL: https://github.com/llvm/llvm-project/commit/f7076cfd3ad39ac71314c2deb5c5517c7c6bfd61
DIFF: https://github.com/llvm/llvm-project/commit/f7076cfd3ad39ac71314c2deb5c5517c7c6bfd61.diff

LOG: [DAGCombiner][RISCV][AMDGPU] Call SimplifyDemandedBits at the end of visitMULHU to enable known bits contant folding.

We don't have real demanded bits support for MULHU, but we can
still use the known bits based constant folding support at the end
of SimplifyDemandedBits to simplify a MULHU. This helps with cases
where we know the LHS and RHS have enough leading zeros so that
the high multiply result is always 0.

Reviewed By: RKSimon

Differential Revision: https://reviews.llvm.org/D106471

Added: 
    

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AMDGPU/sdiv64.ll
    llvm/test/CodeGen/AMDGPU/srem64.ll
    llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
    llvm/test/CodeGen/RISCV/rvv/stepvector.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b278be2ec8e88..065ae7eda7f94 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4577,6 +4577,12 @@ SDValue DAGCombiner::visitMULHU(SDNode *N) {
     }
   }
 
+  // Simplify the operands using demanded-bits information.
+  // We don't have demanded bits support for MULHU so this just enables constant
+  // folding based on known bits.
+  if (SimplifyDemandedBits(SDValue(N, 0)))
+    return SDValue(N, 0);
+
   return SDValue();
 }
 

diff  --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index dd1206bc05034..eff979fad141f 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -1126,12 +1126,11 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v0, s8
 ; GCN-NEXT:    v_cvt_f32_u32_e32 v1, s9
 ; GCN-NEXT:    s_sub_u32 s3, 0, s8
-; GCN-NEXT:    s_subb_u32 s10, 0, s9
+; GCN-NEXT:    s_subb_u32 s6, 0, s9
 ; GCN-NEXT:    s_mov_b32 s7, 0xf000
 ; GCN-NEXT:    v_mac_f32_e32 v0, 0x4f800000, v1
 ; GCN-NEXT:    v_rcp_f32_e32 v0, v0
 ; GCN-NEXT:    v_mov_b32_e32 v1, 0
-; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_mul_f32_e32 v0, 0x5f7ffffc, v0
 ; GCN-NEXT:    v_mul_f32_e32 v3, 0x2f800000, v0
 ; GCN-NEXT:    v_trunc_f32_e32 v3, v3
@@ -1140,7 +1139,7 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cvt_u32_f32_e32 v3, v3
 ; GCN-NEXT:    v_mul_hi_u32 v5, s3, v0
 ; GCN-NEXT:    v_mul_lo_u32 v4, s3, v3
-; GCN-NEXT:    v_mul_lo_u32 v7, s10, v0
+; GCN-NEXT:    v_mul_lo_u32 v7, s6, v0
 ; GCN-NEXT:    v_mul_lo_u32 v6, s3, v0
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v4, v7
@@ -1150,19 +1149,20 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_lo_u32 v8, v3, v6
 ; GCN-NEXT:    v_mul_hi_u32 v6, v3, v6
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
+; GCN-NEXT:    v_mul_hi_u32 v10, v3, v4
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
-; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v10, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v4
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
 ; GCN-NEXT:    v_addc_u32_e64 v4, vcc, v3, v5, s[0:1]
 ; GCN-NEXT:    v_mul_lo_u32 v6, s3, v4
 ; GCN-NEXT:    v_mul_hi_u32 v7, s3, v0
-; GCN-NEXT:    v_mul_lo_u32 v8, s10, v0
+; GCN-NEXT:    v_mul_lo_u32 v8, s6, v0
+; GCN-NEXT:    s_mov_b32 s6, -1
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, v7, v6
 ; GCN-NEXT:    v_mul_lo_u32 v7, s3, v0
 ; GCN-NEXT:    v_add_i32_e32 v6, vcc, v8, v6
@@ -1177,29 +1177,24 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_lo_u32 v4, v4, v6
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, v10, v7
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v11, v9, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v8, v1, vcc
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v8, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v7, v4
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v2, v6, vcc
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v2, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v3, v6, s[0:1]
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v3, v1, s[0:1]
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v3, 24
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GCN-NEXT:    v_mul_lo_u32 v3, v1, 24
 ; GCN-NEXT:    v_mul_hi_u32 v0, v0, 24
-; GCN-NEXT:    v_mul_hi_u32 v3, v3, 24
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, 24
 ; GCN-NEXT:    v_mov_b32_e32 v5, s9
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v2, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, 0, v0
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
-; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v2, s8, v1
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v2, v1, vcc
+; GCN-NEXT:    v_mul_lo_u32 v1, s9, v0
 ; GCN-NEXT:    v_mul_hi_u32 v3, s8, v0
-; GCN-NEXT:    v_mul_lo_u32 v4, s9, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
+; GCN-NEXT:    v_add_i32_e32 v1, vcc, v3, v1
 ; GCN-NEXT:    v_mul_lo_u32 v3, s8, v0
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v2
+; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v1
 ; GCN-NEXT:    v_sub_i32_e32 v3, vcc, 24, v3
 ; GCN-NEXT:    v_subb_u32_e64 v4, s[0:1], v4, v5, vcc
 ; GCN-NEXT:    v_subrev_i32_e64 v5, s[0:1], s8, v3
@@ -1211,22 +1206,22 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_cmp_eq_u32_e64 s[0:1], s9, v4
 ; GCN-NEXT:    v_cndmask_b32_e64 v4, v6, v5, s[0:1]
 ; GCN-NEXT:    v_add_i32_e64 v5, s[0:1], 2, v0
-; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v1, s[0:1]
+; GCN-NEXT:    v_addc_u32_e64 v6, s[0:1], 0, v2, s[0:1]
+; GCN-NEXT:    v_subb_u32_e32 v1, vcc, 0, v1, vcc
 ; GCN-NEXT:    v_add_i32_e64 v7, s[0:1], 1, v0
-; GCN-NEXT:    v_subb_u32_e32 v2, vcc, 0, v2, vcc
-; GCN-NEXT:    v_addc_u32_e64 v8, s[0:1], 0, v1, s[0:1]
+; GCN-NEXT:    v_addc_u32_e64 v2, s[0:1], 0, v2, s[0:1]
+; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v1
 ; GCN-NEXT:    v_cmp_ne_u32_e64 s[0:1], 0, v4
-; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s9, v2
-; GCN-NEXT:    v_cndmask_b32_e64 v4, v8, v6, s[0:1]
-; GCN-NEXT:    v_cndmask_b32_e64 v6, 0, -1, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v4, 0, -1, vcc
 ; GCN-NEXT:    v_cmp_le_u32_e32 vcc, s8, v3
 ; GCN-NEXT:    v_cndmask_b32_e64 v3, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v2
-; GCN-NEXT:    v_cndmask_b32_e32 v2, v6, v3, vcc
-; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v2
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, s9, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v1, v4, v3, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v2, v2, v6, s[0:1]
+; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v1, 0, v2, vcc
 ; GCN-NEXT:    v_cndmask_b32_e64 v2, v7, v5, s[0:1]
 ; GCN-NEXT:    v_cndmask_b32_e32 v0, v0, v2, vcc
-; GCN-NEXT:    v_cndmask_b32_e32 v1, v1, v4, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v0, s2, v0
 ; GCN-NEXT:    v_xor_b32_e32 v1, s2, v1
 ; GCN-NEXT:    v_mov_b32_e32 v2, s2
@@ -1397,43 +1392,38 @@ define i64 @v_test_sdiv_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v3, v3, 24
 ; GCN-NEXT:    v_mul_hi_u32 v4, v4, 24
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v13, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, 0, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v12, vcc
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v6, v0, v3
-; GCN-NEXT:    v_mul_lo_u32 v7, v1, v3
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v3
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; GCN-NEXT:    v_sub_i32_e32 v7, vcc, 0, v5
-; GCN-NEXT:    v_sub_i32_e32 v6, vcc, 24, v6
-; GCN-NEXT:    v_subb_u32_e64 v7, s[4:5], v7, v1, vcc
-; GCN-NEXT:    v_sub_i32_e64 v8, s[4:5], v6, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v9, 0, -1, s[4:5]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v8, v0
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v13, v4, vcc
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, v3
+; GCN-NEXT:    v_mul_hi_u32 v5, v0, v3
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, v3
+; GCN-NEXT:    v_sub_i32_e32 v6, vcc, 0, v4
+; GCN-NEXT:    v_sub_i32_e32 v5, vcc, 24, v5
+; GCN-NEXT:    v_subb_u32_e64 v6, s[4:5], v6, v1, vcc
+; GCN-NEXT:    v_sub_i32_e64 v7, s[4:5], v5, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v6, s[4:5], 0, v6, s[4:5]
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[4:5]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], v7, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v7, v9, v8, s[4:5]
-; GCN-NEXT:    v_add_i32_e64 v8, s[4:5], 2, v3
-; GCN-NEXT:    v_addc_u32_e64 v9, s[4:5], 0, v4, s[4:5]
-; GCN-NEXT:    v_add_i32_e64 v10, s[4:5], 1, v3
-; GCN-NEXT:    v_subb_u32_e32 v5, vcc, 0, v5, vcc
-; GCN-NEXT:    v_addc_u32_e64 v11, s[4:5], 0, v4, s[4:5]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[4:5], 0, v7
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v7, v11, v9, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v9, 0, -1, vcc
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v6, v0
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v0
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[4:5]
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], v6, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v6, v8, v7, s[4:5]
+; GCN-NEXT:    v_add_i32_e64 v7, s[4:5], 2, v3
+; GCN-NEXT:    v_addc_u32_e64 v8, s[4:5], 0, v13, s[4:5]
+; GCN-NEXT:    v_add_i32_e64 v9, s[4:5], 1, v3
+; GCN-NEXT:    v_subb_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_addc_u32_e64 v10, s[4:5], 0, v13, s[4:5]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[4:5], 0, v6
+; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v4, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v6, v10, v8, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
+; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, v4, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v8, v0, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v10, v8, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v4, v7, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v1, v9, v7, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v6, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v3, v0, v2
 ; GCN-NEXT:    v_xor_b32_e32 v0, v1, v2
@@ -1606,43 +1596,39 @@ define i64 @v_test_sdiv_pow2_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v4, 15, v4
 ; GCN-NEXT:    v_lshrrev_b32_e32 v3, 17, v3
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v4
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, v13, v5, vcc
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, 0, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v4, vcc
-; GCN-NEXT:    v_addc_u32_e32 v4, vcc, 0, v12, vcc
-; GCN-NEXT:    v_mul_lo_u32 v5, v0, v4
-; GCN-NEXT:    v_mul_hi_u32 v6, v0, v3
-; GCN-NEXT:    v_mul_lo_u32 v7, v1, v3
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v6, v5
-; GCN-NEXT:    v_mul_lo_u32 v6, v0, v3
-; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
-; GCN-NEXT:    v_sub_i32_e32 v7, vcc, 0, v5
-; GCN-NEXT:    v_sub_i32_e32 v6, vcc, 0x8000, v6
-; GCN-NEXT:    v_subb_u32_e64 v7, s[4:5], v7, v1, vcc
-; GCN-NEXT:    v_sub_i32_e64 v8, s[4:5], v6, v0
-; GCN-NEXT:    v_subbrev_u32_e64 v7, s[4:5], 0, v7, s[4:5]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v9, 0, -1, s[4:5]
-; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v8, v0
+; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v13, v5, vcc
+; GCN-NEXT:    v_mul_lo_u32 v4, v1, v3
+; GCN-NEXT:    v_mul_hi_u32 v5, v0, v3
+; GCN-NEXT:    s_mov_b32 s4, 0x8000
+; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
+; GCN-NEXT:    v_mul_lo_u32 v5, v0, v3
+; GCN-NEXT:    v_sub_i32_e32 v6, vcc, 0, v4
+; GCN-NEXT:    v_sub_i32_e32 v5, vcc, s4, v5
+; GCN-NEXT:    v_subb_u32_e64 v6, s[4:5], v6, v1, vcc
+; GCN-NEXT:    v_sub_i32_e64 v7, s[4:5], v5, v0
+; GCN-NEXT:    v_subbrev_u32_e64 v6, s[4:5], 0, v6, s[4:5]
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v6, v1
 ; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, s[4:5]
-; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], v7, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v7, v9, v8, s[4:5]
-; GCN-NEXT:    v_add_i32_e64 v8, s[4:5], 2, v3
-; GCN-NEXT:    v_addc_u32_e64 v9, s[4:5], 0, v4, s[4:5]
-; GCN-NEXT:    v_add_i32_e64 v10, s[4:5], 1, v3
-; GCN-NEXT:    v_subb_u32_e32 v5, vcc, 0, v5, vcc
-; GCN-NEXT:    v_addc_u32_e64 v11, s[4:5], 0, v4, s[4:5]
-; GCN-NEXT:    v_cmp_ne_u32_e64 s[4:5], 0, v7
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v1
-; GCN-NEXT:    v_cndmask_b32_e64 v7, v11, v9, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e64 v9, 0, -1, vcc
-; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v6, v0
+; GCN-NEXT:    v_cmp_ge_u32_e64 s[4:5], v7, v0
+; GCN-NEXT:    v_cndmask_b32_e64 v7, 0, -1, s[4:5]
+; GCN-NEXT:    v_cmp_eq_u32_e64 s[4:5], v6, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v6, v8, v7, s[4:5]
+; GCN-NEXT:    v_add_i32_e64 v7, s[4:5], 2, v3
+; GCN-NEXT:    v_addc_u32_e64 v8, s[4:5], 0, v13, s[4:5]
+; GCN-NEXT:    v_add_i32_e64 v9, s[4:5], 1, v3
+; GCN-NEXT:    v_subb_u32_e32 v4, vcc, 0, v4, vcc
+; GCN-NEXT:    v_addc_u32_e64 v10, s[4:5], 0, v13, s[4:5]
+; GCN-NEXT:    v_cmp_ne_u32_e64 s[4:5], 0, v6
+; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v4, v1
+; GCN-NEXT:    v_cndmask_b32_e64 v6, v10, v8, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e64 v8, 0, -1, vcc
+; GCN-NEXT:    v_cmp_ge_u32_e32 vcc, v5, v0
 ; GCN-NEXT:    v_cndmask_b32_e64 v0, 0, -1, vcc
-; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, v5, v1
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v9, v0, vcc
+; GCN-NEXT:    v_cmp_eq_u32_e32 vcc, v4, v1
+; GCN-NEXT:    v_cndmask_b32_e32 v0, v8, v0, vcc
 ; GCN-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
-; GCN-NEXT:    v_cndmask_b32_e64 v1, v10, v8, s[4:5]
-; GCN-NEXT:    v_cndmask_b32_e32 v0, v4, v7, vcc
+; GCN-NEXT:    v_cndmask_b32_e64 v1, v9, v7, s[4:5]
+; GCN-NEXT:    v_cndmask_b32_e32 v0, 0, v6, vcc
 ; GCN-NEXT:    v_cndmask_b32_e32 v1, v3, v1, vcc
 ; GCN-NEXT:    v_xor_b32_e32 v3, v0, v2
 ; GCN-NEXT:    v_xor_b32_e32 v0, v1, v2

diff  --git a/llvm/test/CodeGen/AMDGPU/srem64.ll b/llvm/test/CodeGen/AMDGPU/srem64.ll
index 4168a97c2bbce..95d575ec95d70 100644
--- a/llvm/test/CodeGen/AMDGPU/srem64.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem64.ll
@@ -1330,12 +1330,12 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_lo_u32 v8, v3, v6
 ; GCN-NEXT:    v_mul_hi_u32 v6, v3, v6
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v7
+; GCN-NEXT:    v_mul_hi_u32 v10, v3, v4
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v2, v9, vcc
-; GCN-NEXT:    v_mul_hi_u32 v9, v3, v4
 ; GCN-NEXT:    v_mul_lo_u32 v4, v3, v4
 ; GCN-NEXT:    v_add_i32_e32 v5, vcc, v5, v8
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v7, v6, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v9, v1, vcc
+; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v10, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v5, v4
 ; GCN-NEXT:    v_add_i32_e64 v0, s[0:1], v0, v4
 ; GCN-NEXT:    v_addc_u32_e32 v5, vcc, v2, v6, vcc
@@ -1357,29 +1357,24 @@ define amdgpu_kernel void @s_test_srem_k_num_i64(i64 addrspace(1)* %out, i64 %x)
 ; GCN-NEXT:    v_mul_lo_u32 v4, v4, v6
 ; GCN-NEXT:    v_add_i32_e32 v7, vcc, v10, v7
 ; GCN-NEXT:    v_addc_u32_e32 v7, vcc, v11, v9, vcc
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v8, v1, vcc
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v8, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v4, vcc, v7, v4
-; GCN-NEXT:    v_addc_u32_e32 v6, vcc, v2, v6, vcc
+; GCN-NEXT:    v_addc_u32_e32 v1, vcc, v2, v1, vcc
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
-; GCN-NEXT:    v_addc_u32_e64 v3, vcc, v3, v6, s[0:1]
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v3, vcc
-; GCN-NEXT:    v_mul_lo_u32 v4, v3, 24
-; GCN-NEXT:    v_mul_hi_u32 v0, v0, 24
-; GCN-NEXT:    v_mul_hi_u32 v3, v3, 24
+; GCN-NEXT:    v_addc_u32_e64 v1, vcc, v3, v1, s[0:1]
 ; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v4
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v2, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v0, vcc, 0, v0
-; GCN-NEXT:    v_addc_u32_e32 v0, vcc, 0, v2, vcc
 ; GCN-NEXT:    v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; GCN-NEXT:    v_mul_lo_u32 v1, s8, v1
+; GCN-NEXT:    v_mul_lo_u32 v3, v1, 24
+; GCN-NEXT:    v_mul_hi_u32 v0, v0, 24
+; GCN-NEXT:    v_mul_hi_u32 v1, v1, 24
+; GCN-NEXT:    v_add_i32_e32 v0, vcc, v0, v3
+; GCN-NEXT:    v_addc_u32_e32 v0, vcc, v2, v1, vcc
+; GCN-NEXT:    v_mul_lo_u32 v1, s9, v0
 ; GCN-NEXT:    v_mul_hi_u32 v2, s8, v0
-; GCN-NEXT:    v_mul_lo_u32 v3, s9, v0
 ; GCN-NEXT:    v_mul_lo_u32 v0, s8, v0
+; GCN-NEXT:    v_mov_b32_e32 v3, s9
 ; GCN-NEXT:    v_add_i32_e32 v1, vcc, v2, v1
-; GCN-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 0, v1
-; GCN-NEXT:    v_mov_b32_e32 v3, s9
 ; GCN-NEXT:    v_sub_i32_e32 v0, vcc, 24, v0
 ; GCN-NEXT:    v_subb_u32_e64 v2, s[0:1], v2, v3, vcc
 ; GCN-NEXT:    v_subrev_i32_e64 v4, s[0:1], s8, v0
@@ -1574,16 +1569,11 @@ define i64 @v_test_srem_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_mul_hi_u32 v2, v2, 24
 ; GCN-NEXT:    v_mul_hi_u32 v3, v3, 24
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v4
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v12, v3, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 0, v2
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v11, vcc
-; GCN-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v12, v3, vcc
+; GCN-NEXT:    v_mul_lo_u32 v3, v1, v2
 ; GCN-NEXT:    v_mul_hi_u32 v4, v0, v2
-; GCN-NEXT:    v_mul_lo_u32 v5, v1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v2, v0, v2
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v3
 ; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 24, v2
 ; GCN-NEXT:    v_subb_u32_e64 v4, s[4:5], v4, v1, vcc
@@ -1781,18 +1771,14 @@ define i64 @v_test_srem_pow2_k_num_i64(i64 %x) {
 ; GCN-NEXT:    v_lshlrev_b32_e32 v3, 15, v3
 ; GCN-NEXT:    v_lshrrev_b32_e32 v2, 17, v2
 ; GCN-NEXT:    v_add_i32_e32 v2, vcc, v2, v3
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, v12, v4, vcc
-; GCN-NEXT:    v_add_i32_e32 v2, vcc, 0, v2
-; GCN-NEXT:    v_addc_u32_e32 v2, vcc, 0, v3, vcc
-; GCN-NEXT:    v_addc_u32_e32 v3, vcc, 0, v11, vcc
-; GCN-NEXT:    v_mul_lo_u32 v3, v0, v3
+; GCN-NEXT:    v_addc_u32_e32 v2, vcc, v12, v4, vcc
+; GCN-NEXT:    v_mul_lo_u32 v3, v1, v2
 ; GCN-NEXT:    v_mul_hi_u32 v4, v0, v2
-; GCN-NEXT:    v_mul_lo_u32 v5, v1, v2
 ; GCN-NEXT:    v_mul_lo_u32 v2, v0, v2
+; GCN-NEXT:    s_mov_b32 s4, 0x8000
 ; GCN-NEXT:    v_add_i32_e32 v3, vcc, v4, v3
-; GCN-NEXT:    v_add_i32_e32 v3, vcc, v3, v5
 ; GCN-NEXT:    v_sub_i32_e32 v4, vcc, 0, v3
-; GCN-NEXT:    v_sub_i32_e32 v2, vcc, 0x8000, v2
+; GCN-NEXT:    v_sub_i32_e32 v2, vcc, s4, v2
 ; GCN-NEXT:    v_subb_u32_e64 v4, s[4:5], v4, v1, vcc
 ; GCN-NEXT:    v_sub_i32_e64 v5, s[4:5], v2, v0
 ; GCN-NEXT:    v_subbrev_u32_e64 v6, s[6:7], 0, v4, s[4:5]

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
index 615822235823f..9384b4a3e523d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-vscale.i64.ll
@@ -70,12 +70,10 @@ define i64 @vscale_non_pow2() nounwind {
 ;
 ; RV32-LABEL: vscale_non_pow2:
 ; RV32:       # %bb.0: # %entry
-; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a0, a1, 1
-; RV32-NEXT:    add a0, a0, a1
-; RV32-NEXT:    srli a1, a1, 3
-; RV32-NEXT:    addi a2, zero, 24
-; RV32-NEXT:    mulhu a1, a1, a2
+; RV32-NEXT:    csrr a0, vlenb
+; RV32-NEXT:    slli a1, a0, 1
+; RV32-NEXT:    add a0, a1, a0
+; RV32-NEXT:    mv a1, zero
 ; RV32-NEXT:    ret
 entry:
   %0 = call i64 @llvm.vscale.i64()

diff  --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index bac37614f2af7..dcf6de2c71a8c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -551,11 +551,8 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
 ; RV32:       # %bb.0: # %entry
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    sw zero, 12(sp)
 ; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    srli a1, a0, 3
-; RV32-NEXT:    addi a2, zero, 24
-; RV32-NEXT:    mulhu a1, a1, a2
-; RV32-NEXT:    sw a1, 12(sp)
 ; RV32-NEXT:    slli a1, a0, 1
 ; RV32-NEXT:    add a0, a1, a0
 ; RV32-NEXT:    sw a0, 8(sp)


        


More information about the llvm-commits mailing list