[llvm] a75d617 - [GlobalISel] Add combine for (x | mask) -> x when (x | mask) == x

Mirko Brkusanin via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 10 02:37:16 PST 2020


Author: Mirko Brkusanin
Date: 2020-11-10T11:32:13+01:00
New Revision: a75d6178b809cf11d54af91e8e1254064d58f6eb

URL: https://github.com/llvm/llvm-project/commit/a75d6178b809cf11d54af91e8e1254064d58f6eb
DIFF: https://github.com/llvm/llvm-project/commit/a75d6178b809cf11d54af91e8e1254064d58f6eb.diff

LOG: [GlobalISel] Add combine for (x | mask) -> x when (x | mask) == x

If we have a mask, and a value x, where (x | mask) == x, we can drop the OR
and just use x.

Differential Revision: https://reviews.llvm.org/D90952

Added: 
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-or-redundant.mir

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
    llvm/include/llvm/Target/GlobalISel/Combine.td
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
    llvm/lib/Target/AArch64/AArch64Combine.td
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-of-shifted-logic.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
    llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 4deefc497d6d..432587ea46c4 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -431,6 +431,15 @@ class CombinerHelper {
   /// success.
   bool matchRedundantAnd(MachineInstr &MI, Register &Replacement);
 
+  /// \return true if \p MI is a G_OR instruction whose operands are x and y
+  /// where x | y == x or x | y == y. (E.g., one of operands is all-zeros
+  /// value.)
+  ///
+  /// \param [in] MI - The G_OR instruction.
+  /// \param [out] Replacement - A register the G_OR should be replaced with on
+  /// success.
+  bool matchRedundantOr(MachineInstr &MI, Register &Replacement);
+
   /// \return true if \p MI is a G_SEXT_INREG that can be erased.
   bool matchRedundantSExtInReg(MachineInstr &MI);
 

diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index fc1718042290..32aec75af1fa 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -390,6 +390,15 @@ def redundant_and: GICombineRule <
   (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
 >;
 
+// Fold (x | y) -> x or (x | y) -> y when (x | y) is known to equal x or equal y.
+def redundant_or_matchinfo : GIDefMatchData<"Register">;
+def redundant_or: GICombineRule <
+  (defs root:$root, redundant_or_matchinfo:$matchinfo),
+  (match (wip_match_opcode G_OR):$root,
+         [{ return Helper.matchRedundantOr(*${root}, ${matchinfo}); }]),
+  (apply [{ return Helper.replaceSingleDefInstWithReg(*${root}, ${matchinfo}); }])
+>;
+
 // If the input is already sign extended, just drop the extension.
 // sext_inreg x, K ->
 //   if computeNumSignBits(x) >= (x.getScalarSizeInBits() - K + 1)
@@ -551,7 +560,7 @@ def identity_combines : GICombineGroup<[select_same_val, right_identity_zero,
 def const_combines : GICombineGroup<[constant_fp_op, const_ptradd_to_i2p]>;
 
 def known_bits_simplifications : GICombineGroup<[
-  redundant_and, redundant_sext_inreg]>;
+  redundant_and, redundant_sext_inreg, redundant_or]>;
 
 def width_reduction_combines : GICombineGroup<[reduce_shl_of_extend]>;
 

diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index cee43e5ca181..32bdba442b56 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -2888,6 +2888,52 @@ bool CombinerHelper::matchRedundantAnd(MachineInstr &MI,
   return false;
 }
 
+bool CombinerHelper::matchRedundantOr(MachineInstr &MI, Register &Replacement) {
+  // Given
+  //
+  // %y:_(sN) = G_SOMETHING
+  // %x:_(sN) = G_SOMETHING
+  // %res:_(sN) = G_OR %x, %y
+  //
+  // Eliminate the G_OR when it is known that x | y == x or x | y == y.
+  assert(MI.getOpcode() == TargetOpcode::G_OR);
+  if (!KB)
+    return false;
+
+  Register OrDst = MI.getOperand(0).getReg();
+  LLT DstTy = MRI.getType(OrDst);
+
+  // FIXME: This should be removed once GISelKnownBits supports vectors.
+  if (DstTy.isVector())
+    return false;
+
+  Register LHS = MI.getOperand(1).getReg();
+  Register RHS = MI.getOperand(2).getReg();
+  KnownBits LHSBits = KB->getKnownBits(LHS);
+  KnownBits RHSBits = KB->getKnownBits(RHS);
+
+  // Check that x | Mask == x.
+  // x | 0 == x, always
+  // x | 1 == x, only if x is also 1
+  // Meaning Mask has no effect if every bit is either zero in Mask or one in x.
+  //
+  // Check if we can replace OrDst with the LHS of the G_OR
+  if (canReplaceReg(OrDst, LHS, MRI) &&
+      (LHSBits.One | RHSBits.Zero).isAllOnesValue()) {
+    Replacement = LHS;
+    return true;
+  }
+
+  // Check if we can replace OrDst with the RHS of the G_OR
+  if (canReplaceReg(OrDst, RHS, MRI) &&
+      (LHSBits.Zero | RHSBits.One).isAllOnesValue()) {
+    Replacement = RHS;
+    return true;
+  }
+
+  return false;
+}
+
 bool CombinerHelper::matchRedundantSExtInReg(MachineInstr &MI) {
   // If the input is already sign extended, just drop the extension.
   Register Src = MI.getOperand(1).getReg();

diff  --git a/llvm/lib/Target/AArch64/AArch64Combine.td b/llvm/lib/Target/AArch64/AArch64Combine.td
index bad2c9919b9e..8cbf5931390e 100644
--- a/llvm/lib/Target/AArch64/AArch64Combine.td
+++ b/llvm/lib/Target/AArch64/AArch64Combine.td
@@ -128,6 +128,6 @@ def AArch64PostLegalizerCombinerHelper
                         sext_trunc_sextload,
                         hoist_logic_op_with_same_opcode_hands,
                         redundant_and, xor_of_and_with_same_reg,
-                        extractvecelt_pairwise_add]> {
+                        extractvecelt_pairwise_add, redundant_or]> {
   let DisableRuleOption = "aarch64postlegalizercombiner-disable-rule";
 }

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-or-redundant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-or-redundant.mir
new file mode 100644
index 000000000000..4991751a73bf
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-or-redundant.mir
@@ -0,0 +1,156 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -march=amdgcn -run-pass=amdgpu-prelegalizer-combiner -verify-machineinstrs %s -o - | FileCheck %s
+
+---
+name:            test_const_const_1
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; CHECK-LABEL: name: test_const_const_1
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK: $sgpr0 = COPY [[C]](s32)
+    ; CHECK: SI_RETURN_TO_EPILOG implicit $sgpr0
+    %0:_(s32) = G_CONSTANT i32 255
+    %1:_(s32) = G_CONSTANT i32 15
+    %2:_(s32) = G_OR %0(s32), %1(s32)
+    $sgpr0 = COPY %2(s32)
+    SI_RETURN_TO_EPILOG implicit $sgpr0
+...
+
+---
+name:            test_const_const_2
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; CHECK-LABEL: name: test_const_const_2
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK: $vgpr0 = COPY [[C]](s32)
+    ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+    %0:_(s32) = G_CONSTANT i32 15
+    %1:_(s32) = G_CONSTANT i32 255
+    %2:_(s32) = G_OR %0(s32), %1(s32)
+    $vgpr0 = COPY %2(s32)
+    SI_RETURN_TO_EPILOG implicit $vgpr0
+...
+
+---
+name:            test_const_const_3
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    ; CHECK-LABEL: name: test_const_const_3
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 1431655765
+    ; CHECK: $vgpr0 = COPY [[C]](s32)
+    ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+    %0:_(s32) = G_CONSTANT i32 1431655765
+    %1:_(s32) = G_CONSTANT i32 1145324612
+    %2:_(s32) = G_OR %1(s32), %0(s32)
+    $vgpr0 = COPY %2(s32)
+    SI_RETURN_TO_EPILOG implicit $vgpr0
+...
+
+---
+name:            test_or_or
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_or_or
+    ; CHECK: liveins: $vgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+    ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[C]]
+    ; CHECK: $vgpr0 = COPY [[OR]](s32)
+    ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_CONSTANT i32 255
+    %2:_(s32) = G_CONSTANT i32 15
+    %3:_(s32) = G_OR %0, %1(s32)
+    %4:_(s32) = G_OR %3, %2
+    $vgpr0 = COPY %4(s32)
+    SI_RETURN_TO_EPILOG implicit $vgpr0
+...
+
+---
+name:            test_shl_xor_or
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $sgpr0
+
+    ; CHECK-LABEL: name: test_shl_xor_or
+    ; CHECK: liveins: $sgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SHL]], [[C1]]
+    ; CHECK: $sgpr0 = COPY [[XOR]](s32)
+    ; CHECK: SI_RETURN_TO_EPILOG implicit $sgpr0
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = G_CONSTANT i32 5
+    %2:_(s32) = G_CONSTANT i32 -1
+    %3:_(s32) = G_CONSTANT i32 31
+    %4:_(s32) = G_SHL %0, %1(s32)
+    %5:_(s32) = G_XOR %4(s32), %2(s32)
+    %6:_(s32) = G_OR %5(s32), %3(s32)
+    $sgpr0 = COPY %6(s32)
+    SI_RETURN_TO_EPILOG implicit $sgpr0
+...
+
+---
+name:            test_lshr_xor_or
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $vgpr0
+
+    ; CHECK-LABEL: name: test_lshr_xor_or
+    ; CHECK: liveins: $vgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 5
+    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[LSHR]], [[C1]]
+    ; CHECK: $vgpr0 = COPY [[XOR]](s32)
+    ; CHECK: SI_RETURN_TO_EPILOG implicit $vgpr0
+    %0:_(s32) = COPY $vgpr0
+    %1:_(s32) = G_CONSTANT i32 5
+    %2:_(s32) = G_CONSTANT i32 -1
+    %3:_(s32) = G_CONSTANT i32 4160749568
+    %4:_(s32) = G_LSHR %0, %1(s32)
+    %5:_(s32) = G_XOR %4(s32), %2(s32)
+    %6:_(s32) = G_OR %5(s32), %3(s32)
+    $vgpr0 = COPY %6(s32)
+    SI_RETURN_TO_EPILOG implicit $vgpr0
+...
+
+---
+name:            test_or_non_const
+tracksRegLiveness: true
+body:             |
+  bb.0:
+    liveins: $sgpr0, $sgpr1
+
+    ; CHECK-LABEL: name: test_or_non_const
+    ; CHECK: liveins: $sgpr0, $sgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0
+    ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+    ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+    ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+    ; CHECK: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[LSHR]], [[C1]]
+    ; CHECK: $sgpr0 = COPY [[XOR]](s32)
+    ; CHECK: SI_RETURN_TO_EPILOG implicit $sgpr0
+    %0:_(s32) = COPY $sgpr0
+    %1:_(s32) = COPY $sgpr1
+    %2:_(s32) = G_CONSTANT i32 16
+    %3:_(s32) = G_CONSTANT i32 -1
+    %4:_(s32) = G_CONSTANT i32 4294901760
+    %5:_(s32) = G_LSHR %0, %2(s32)
+    %6:_(s32) = G_XOR %5, %3(s32)
+    %7:_(s32) = G_AND %1, %4(s32)
+    %8:_(s32) = G_OR %6, %7
+    $sgpr0 = COPY %8(s32)
+    SI_RETURN_TO_EPILOG implicit $sgpr0
+...

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-of-shifted-logic.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-of-shifted-logic.ll
index c8a1e08cb853..8d6edafc3036 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-of-shifted-logic.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shift-of-shifted-logic.ll
@@ -168,8 +168,7 @@ define amdgpu_cs i32 @test_lshr_or_1(i32 inreg %arg1) {
 define amdgpu_cs i32 @test_lshr_or_2(i32 inreg %arg1) {
 ; CHECK-LABEL: test_lshr_or_2:
 ; CHECK:       ; %bb.0: ; %.entry
-; CHECK-NEXT:    s_lshr_b32 s0, s0, 8
-; CHECK-NEXT:    s_or_b32 s0, s0, 0xffffff
+; CHECK-NEXT:    s_mov_b32 s0, 0xffffff
 ; CHECK-NEXT:    ; return to shader part epilog
 .entry:
   %z1 = lshr i32 %arg1, 3

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
index 1d56af554190..fe15a5f56ad0 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/sdiv.i64.ll
@@ -3390,20 +3390,17 @@ define i64 @v_sdiv_i64_24bit(i64 %num, i64 %den) {
 ; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CGP-NEXT:    s_mov_b32 s4, 0xffffff
 ; CGP-NEXT:    v_and_b32_e32 v1, s4, v2
-; CGP-NEXT:    v_cvt_f32_i32_e32 v2, v1
+; CGP-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; CGP-NEXT:    v_and_b32_e32 v0, s4, v0
-; CGP-NEXT:    v_cvt_f32_i32_e32 v3, v0
-; CGP-NEXT:    v_xor_b32_e32 v0, v0, v1
-; CGP-NEXT:    v_rcp_f32_e32 v4, v2
-; CGP-NEXT:    v_ashrrev_i32_e32 v0, 30, v0
-; CGP-NEXT:    v_or_b32_e32 v0, 1, v0
-; CGP-NEXT:    v_mul_f32_e32 v1, v3, v4
-; CGP-NEXT:    v_trunc_f32_e32 v1, v1
-; CGP-NEXT:    v_mad_f32 v3, -v1, v2, v3
-; CGP-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; CGP-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
-; CGP-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
+; CGP-NEXT:    v_cvt_f32_i32_e32 v0, v0
+; CGP-NEXT:    v_rcp_f32_e32 v2, v1
+; CGP-NEXT:    v_mul_f32_e32 v2, v0, v2
+; CGP-NEXT:    v_trunc_f32_e32 v2, v2
+; CGP-NEXT:    v_mad_f32 v0, -v2, v1, v0
+; CGP-NEXT:    v_cvt_i32_f32_e32 v2, v2
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v0|, |v1|
+; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v2, v0
 ; CGP-NEXT:    v_bfe_i32 v0, v0, 0, 25
 ; CGP-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; CGP-NEXT:    s_setpc_b64 s[30:31]
@@ -3695,36 +3692,30 @@ define <2 x i64> @v_sdiv_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
 ; CGP-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
 ; CGP-NEXT:    s_mov_b32 s4, 0xffffff
 ; CGP-NEXT:    v_and_b32_e32 v1, s4, v4
-; CGP-NEXT:    v_cvt_f32_i32_e32 v3, v1
+; CGP-NEXT:    v_cvt_f32_i32_e32 v1, v1
 ; CGP-NEXT:    v_and_b32_e32 v0, s4, v0
+; CGP-NEXT:    v_cvt_f32_i32_e32 v0, v0
 ; CGP-NEXT:    v_and_b32_e32 v4, s4, v6
-; CGP-NEXT:    v_cvt_f32_i32_e32 v5, v0
-; CGP-NEXT:    v_rcp_f32_e32 v6, v3
-; CGP-NEXT:    v_xor_b32_e32 v0, v0, v1
+; CGP-NEXT:    v_rcp_f32_e32 v3, v1
+; CGP-NEXT:    v_cvt_f32_i32_e32 v4, v4
 ; CGP-NEXT:    v_and_b32_e32 v2, s4, v2
-; CGP-NEXT:    v_ashrrev_i32_e32 v0, 30, v0
-; CGP-NEXT:    v_mul_f32_e32 v1, v5, v6
-; CGP-NEXT:    v_trunc_f32_e32 v1, v1
-; CGP-NEXT:    v_mad_f32 v5, -v1, v3, v5
-; CGP-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, |v3|
-; CGP-NEXT:    v_cvt_f32_i32_e32 v3, v4
-; CGP-NEXT:    v_cvt_f32_i32_e32 v5, v2
-; CGP-NEXT:    v_cvt_i32_f32_e32 v1, v1
-; CGP-NEXT:    v_xor_b32_e32 v2, v2, v4
-; CGP-NEXT:    v_rcp_f32_e32 v6, v3
-; CGP-NEXT:    v_or_b32_e32 v0, 1, v0
-; CGP-NEXT:    v_cndmask_b32_e32 v0, 0, v0, vcc
-; CGP-NEXT:    v_add_i32_e32 v0, vcc, v1, v0
-; CGP-NEXT:    v_mul_f32_e32 v4, v5, v6
-; CGP-NEXT:    v_trunc_f32_e32 v4, v4
-; CGP-NEXT:    v_mad_f32 v5, -v4, v3, v5
-; CGP-NEXT:    v_cvt_i32_f32_e32 v4, v4
-; CGP-NEXT:    v_ashrrev_i32_e32 v2, 30, v2
-; CGP-NEXT:    v_or_b32_e32 v2, 1, v2
-; CGP-NEXT:    v_cmp_ge_f32_e64 vcc, |v5|, |v3|
-; CGP-NEXT:    v_cndmask_b32_e32 v2, 0, v2, vcc
-; CGP-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
+; CGP-NEXT:    v_cvt_f32_i32_e32 v2, v2
+; CGP-NEXT:    v_mul_f32_e32 v3, v0, v3
+; CGP-NEXT:    v_trunc_f32_e32 v3, v3
+; CGP-NEXT:    v_mad_f32 v0, -v3, v1, v0
+; CGP-NEXT:    v_cvt_i32_f32_e32 v3, v3
+; CGP-NEXT:    v_rcp_f32_e32 v5, v4
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v0|, |v1|
+; CGP-NEXT:    v_cndmask_b32_e64 v0, 0, 1, s[4:5]
+; CGP-NEXT:    v_add_i32_e32 v0, vcc, v3, v0
+; CGP-NEXT:    v_mul_f32_e32 v3, v2, v5
+; CGP-NEXT:    v_trunc_f32_e32 v3, v3
+; CGP-NEXT:    v_mad_f32 v2, -v3, v4, v2
+; CGP-NEXT:    v_cvt_i32_f32_e32 v3, v3
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v2|, |v4|
+; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[4:5]
 ; CGP-NEXT:    v_bfe_i32 v0, v0, 0, 25
+; CGP-NEXT:    v_add_i32_e32 v2, vcc, v3, v2
 ; CGP-NEXT:    v_bfe_i32 v2, v2, 0, 25
 ; CGP-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; CGP-NEXT:    v_ashrrev_i32_e32 v3, 31, v2

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
index 9de42fa16e49..152c5bc39274 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/srem.i64.ll
@@ -3339,16 +3339,13 @@ define i64 @v_srem_i64_24bit(i64 %num, i64 %den) {
 ; CGP-NEXT:    v_cvt_f32_i32_e32 v2, v1
 ; CGP-NEXT:    v_and_b32_e32 v0, s4, v0
 ; CGP-NEXT:    v_cvt_f32_i32_e32 v3, v0
-; CGP-NEXT:    v_xor_b32_e32 v5, v0, v1
 ; CGP-NEXT:    v_rcp_f32_e32 v4, v2
-; CGP-NEXT:    v_ashrrev_i32_e32 v5, 30, v5
-; CGP-NEXT:    v_or_b32_e32 v5, 1, v5
 ; CGP-NEXT:    v_mul_f32_e32 v4, v3, v4
 ; CGP-NEXT:    v_trunc_f32_e32 v4, v4
 ; CGP-NEXT:    v_mad_f32 v3, -v4, v2, v3
 ; CGP-NEXT:    v_cvt_i32_f32_e32 v4, v4
-; CGP-NEXT:    v_cmp_ge_f32_e64 vcc, |v3|, |v2|
-; CGP-NEXT:    v_cndmask_b32_e32 v2, 0, v5, vcc
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v3|, |v2|
+; CGP-NEXT:    v_cndmask_b32_e64 v2, 0, 1, s[4:5]
 ; CGP-NEXT:    v_add_i32_e32 v2, vcc, v4, v2
 ; CGP-NEXT:    v_mul_lo_u32 v1, v2, v1
 ; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
@@ -3644,36 +3641,30 @@ define <2 x i64> @v_srem_v2i64_24bit(<2 x i64> %num, <2 x i64> %den) {
 ; CGP-NEXT:    v_cvt_f32_i32_e32 v3, v1
 ; CGP-NEXT:    v_and_b32_e32 v0, s4, v0
 ; CGP-NEXT:    v_cvt_f32_i32_e32 v4, v0
-; CGP-NEXT:    v_xor_b32_e32 v7, v0, v1
+; CGP-NEXT:    v_and_b32_e32 v6, s4, v6
 ; CGP-NEXT:    v_rcp_f32_e32 v5, v3
-; CGP-NEXT:    v_ashrrev_i32_e32 v7, 30, v7
-; CGP-NEXT:    v_or_b32_e32 v7, 1, v7
 ; CGP-NEXT:    v_and_b32_e32 v2, s4, v2
 ; CGP-NEXT:    v_mul_f32_e32 v5, v4, v5
 ; CGP-NEXT:    v_trunc_f32_e32 v5, v5
 ; CGP-NEXT:    v_mad_f32 v4, -v5, v3, v4
 ; CGP-NEXT:    v_cvt_i32_f32_e32 v5, v5
-; CGP-NEXT:    v_cmp_ge_f32_e64 vcc, |v4|, |v3|
-; CGP-NEXT:    v_cndmask_b32_e32 v3, 0, v7, vcc
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v4|, |v3|
+; CGP-NEXT:    v_cvt_f32_i32_e32 v4, v6
+; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
 ; CGP-NEXT:    v_add_i32_e32 v3, vcc, v5, v3
 ; CGP-NEXT:    v_mul_lo_u32 v1, v3, v1
-; CGP-NEXT:    v_and_b32_e32 v3, s4, v6
-; CGP-NEXT:    v_cvt_f32_i32_e32 v4, v3
-; CGP-NEXT:    v_xor_b32_e32 v6, v2, v3
-; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
-; CGP-NEXT:    v_cvt_f32_i32_e32 v1, v2
+; CGP-NEXT:    v_cvt_f32_i32_e32 v3, v2
 ; CGP-NEXT:    v_rcp_f32_e32 v5, v4
-; CGP-NEXT:    v_ashrrev_i32_e32 v6, 30, v6
-; CGP-NEXT:    v_or_b32_e32 v6, 1, v6
+; CGP-NEXT:    v_sub_i32_e32 v0, vcc, v0, v1
 ; CGP-NEXT:    v_bfe_i32 v0, v0, 0, 25
-; CGP-NEXT:    v_mul_f32_e32 v5, v1, v5
-; CGP-NEXT:    v_trunc_f32_e32 v5, v5
-; CGP-NEXT:    v_mad_f32 v1, -v5, v4, v1
-; CGP-NEXT:    v_cvt_i32_f32_e32 v5, v5
-; CGP-NEXT:    v_cmp_ge_f32_e64 vcc, |v1|, |v4|
-; CGP-NEXT:    v_cndmask_b32_e32 v1, 0, v6, vcc
-; CGP-NEXT:    v_add_i32_e32 v1, vcc, v5, v1
-; CGP-NEXT:    v_mul_lo_u32 v3, v1, v3
+; CGP-NEXT:    v_mul_f32_e32 v1, v3, v5
+; CGP-NEXT:    v_trunc_f32_e32 v1, v1
+; CGP-NEXT:    v_mad_f32 v3, -v1, v4, v3
+; CGP-NEXT:    v_cvt_i32_f32_e32 v1, v1
+; CGP-NEXT:    v_cmp_ge_f32_e64 s[4:5], |v3|, |v4|
+; CGP-NEXT:    v_cndmask_b32_e64 v3, 0, 1, s[4:5]
+; CGP-NEXT:    v_add_i32_e32 v1, vcc, v1, v3
+; CGP-NEXT:    v_mul_lo_u32 v3, v1, v6
 ; CGP-NEXT:    v_ashrrev_i32_e32 v1, 31, v0
 ; CGP-NEXT:    v_sub_i32_e32 v2, vcc, v2, v3
 ; CGP-NEXT:    v_bfe_i32 v2, v2, 0, 25


        


More information about the llvm-commits mailing list