[llvm] [GlobalISel] Rewrite binop_left_to_zero using MIR Patterns (PR #177924)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 29 07:33:47 PST 2026


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-globalisel

@llvm/pr-subscribers-backend-risc-v

Author: Osman Yasar (osmanyasar05)

<details>
<summary>Changes</summary>

Following 2d87319f06ef936233ba6aaa612da9586c427d68, this PR rewrites the `binop_left_to_zero` rule using MIR Patterns.

The new pattern uses `GIReplaceReg` in the apply clause. According to [MIRPatterns.rst](https://github.com/llvm/llvm-project/blob/5b4a5cf51f37cb42c5e0bdb22b43acde137281c8/llvm/docs/GlobalISel/MIRPatterns.rst?plain=1#L222), `GIReplaceReg` checks `canReplaceReg`, so the new apply pattern is equivalent to the old `matchOperandIsZero` implementation.

Added tests for all the opcodes covered by this rule `(G_SHL, G_LSHR, G_ASHR, G_SDIV, G_UDIV, G_SREM, G_UREM, G_MUL)`.

---
Full diff: https://github.com/llvm/llvm-project/pull/177924.diff


5 Files Affected:

- (modified) llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h (-3) 
- (modified) llvm/include/llvm/Target/GlobalISel/Combine.td (+9-5) 
- (modified) llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp (-7) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/combine.mir (+119) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll (+37-40) 


``````````diff
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 5d4347066a40c..da53005ed801e 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -518,9 +518,6 @@ class CombinerHelper {
   /// Optimize (x op x) -> x
   bool matchBinOpSameVal(MachineInstr &MI) const;
 
-  /// Check if operand \p OpIdx is zero.
-  bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) const;
-
   /// Check if operand \p OpIdx is undef.
   bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const;
 
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index a9b4932b2e317..45cde3f9ac7af 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -609,12 +609,16 @@ def binop_same_val: GICombineRule<
 >;
 
 // Fold (0 op x) - > 0
+def binop_left_to_zero_frags : GICombinePatFrag<
+  (outs root:$dst, $zero), (ins $rhs),
+  !foreach(op,
+           [G_SHL, G_LSHR, G_ASHR, G_SDIV, G_UDIV, G_SREM, G_UREM, G_MUL],
+           (pattern (G_CONSTANT $zero, 0), (op $dst, $zero, $rhs)))>;
+
 def binop_left_to_zero: GICombineRule<
-  (defs root:$root),
-  (match (wip_match_opcode G_SHL, G_LSHR, G_ASHR, G_SDIV, G_UDIV, G_SREM,
-                           G_UREM):$root,
-    [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
-  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
+  (defs root:$dst),
+  (match (binop_left_to_zero_frags $dst, $zero, $rhs)),
+  (apply (GIReplaceReg $dst, $zero))
 >;
 
 def urem_pow2_to_mask : GICombineRule<
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 85d5f06b9813d..94cbbef2c5d29 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -3026,13 +3026,6 @@ bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) const {
                        MRI);
 }
 
-bool CombinerHelper::matchOperandIsZero(MachineInstr &MI,
-                                        unsigned OpIdx) const {
-  return matchConstantOp(MI.getOperand(OpIdx), 0) &&
-         canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
-                       MRI);
-}
-
 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI,
                                          unsigned OpIdx) const {
   MachineOperand &MO = MI.getOperand(OpIdx);
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/combine.mir b/llvm/test/CodeGen/RISCV/GlobalISel/combine.mir
index cd574945965b5..0d12e83354cb5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/combine.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/combine.mir
@@ -17,3 +17,122 @@ body:             |
     $x10 = COPY %3(s64)
     PseudoRET implicit $x10
 ...
+
+---
+name:            binop_left_to_zero_shl
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_shl
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_SHL %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_lshr
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_lshr
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_LSHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_ashr
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_ashr
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_ASHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_sdiv
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_sdiv
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_SDIV %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_udiv
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_udiv
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_UDIV %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_srem
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_srem
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_SREM %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_urem
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_urem
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_UREM %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_mul
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_mul
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_MUL %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
index 5f61ee2d02d24..7c0e91fc4a291 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
@@ -97,30 +97,28 @@ define i64 @udiv64_constant_no_add(i64 %a) nounwind {
 ; RV32-NEXT:    mul a5, a1, a4
 ; RV32-NEXT:    mul a6, a0, a2
 ; RV32-NEXT:    mulhu a7, a0, a4
-; RV32-NEXT:    mul t1, a1, a2
-; RV32-NEXT:    mulhu t2, a1, a4
+; RV32-NEXT:    mul t0, a1, a2
+; RV32-NEXT:    mulhu t1, a1, a4
 ; RV32-NEXT:    mulhu a0, a0, a2
 ; RV32-NEXT:    mulhu a1, a1, a2
 ; RV32-NEXT:    add a5, a5, a6
-; RV32-NEXT:    mv t0, t1
+; RV32-NEXT:    sltu a2, t0, t0
 ; RV32-NEXT:    mv a1, a1
-; RV32-NEXT:    sltu a4, a5, a6
+; RV32-NEXT:    sltiu a4, t0, 0
+; RV32-NEXT:    add t0, t0, t1
+; RV32-NEXT:    sltu a6, a5, a6
 ; RV32-NEXT:    add a5, a5, a7
-; RV32-NEXT:    sltu a6, t1, t1
-; RV32-NEXT:    sltiu t1, t1, 0
-; RV32-NEXT:    add t0, t0, t2
-; RV32-NEXT:    mv a1, a1
-; RV32-NEXT:    sltu a2, a5, a7
-; RV32-NEXT:    add a6, a6, t1
-; RV32-NEXT:    sltu a5, t0, t2
+; RV32-NEXT:    add a2, a2, a4
+; RV32-NEXT:    sltu a4, t0, t1
 ; RV32-NEXT:    add t0, t0, a0
 ; RV32-NEXT:    mv a1, a1
-; RV32-NEXT:    add a2, a4, a2
-; RV32-NEXT:    add a5, a6, a5
+; RV32-NEXT:    sltu a3, a5, a7
+; RV32-NEXT:    add a2, a2, a4
 ; RV32-NEXT:    sltu a0, t0, a0
-; RV32-NEXT:    add a0, a5, a0
-; RV32-NEXT:    add t0, t0, a2
-; RV32-NEXT:    sltu a2, t0, a2
+; RV32-NEXT:    add a3, a6, a3
+; RV32-NEXT:    add a0, a2, a0
+; RV32-NEXT:    add t0, t0, a3
+; RV32-NEXT:    sltu a2, t0, a3
 ; RV32-NEXT:    srli a3, t0, 2
 ; RV32-NEXT:    add a0, a0, a2
 ; RV32-NEXT:    add a1, a1, a0
@@ -154,44 +152,43 @@ define i64 @udiv64_constant_add(i64 %a) nounwind {
 ; RV32-NEXT:    addi a2, a2, 1171
 ; RV32-NEXT:    addi a3, a3, -1756
 ; RV32-NEXT:    mul a5, a1, a2
-; RV32-NEXT:    mul a6, a0, a3
+; RV32-NEXT:    mul a6, a1, a3
+; RV32-NEXT:    mulhu t0, a1, a3
+; RV32-NEXT:    sltu t1, a6, a6
+; RV32-NEXT:    mv a7, t0
+; RV32-NEXT:    sltiu t0, a6, 0
+; RV32-NEXT:    add t0, t1, t0
+; RV32-NEXT:    mul t1, a0, a3
+; RV32-NEXT:    mv a4, a7
 ; RV32-NEXT:    mulhu a7, a0, a2
-; RV32-NEXT:    mulhu t2, a1, a3
-; RV32-NEXT:    mv t1, t2
-; RV32-NEXT:    mv t1, t1
-; RV32-NEXT:    mul t2, a1, a3
 ; RV32-NEXT:    mulhu a2, a1, a2
 ; RV32-NEXT:    mulhu a3, a0, a3
-; RV32-NEXT:    add a5, a5, a6
-; RV32-NEXT:    mv t0, t2
-; RV32-NEXT:    sltu a6, a5, a6
+; RV32-NEXT:    add a5, a5, t1
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    sltu t1, a5, t1
 ; RV32-NEXT:    add a5, a5, a7
-; RV32-NEXT:    sltu t2, t2, t2
+; RV32-NEXT:    sltu a2, a6, a2
+; RV32-NEXT:    add a6, a6, a3
 ; RV32-NEXT:    sltu a5, a5, a7
-; RV32-NEXT:    sltiu a7, t0, 0
-; RV32-NEXT:    add t0, t0, a2
-; RV32-NEXT:    add a7, t2, a7
-; RV32-NEXT:    sltu a2, t0, a2
-; RV32-NEXT:    add t0, t0, a3
-; RV32-NEXT:    add a5, a6, a5
-; RV32-NEXT:    add a2, a7, a2
-; RV32-NEXT:    sltu a3, t0, a3
+; RV32-NEXT:    add a2, t0, a2
+; RV32-NEXT:    sltu a3, a6, a3
+; RV32-NEXT:    add a5, t1, a5
 ; RV32-NEXT:    add a2, a2, a3
-; RV32-NEXT:    add t0, t0, a5
-; RV32-NEXT:    sltu a3, t0, a5
-; RV32-NEXT:    sub a5, a0, t0
-; RV32-NEXT:    sltu a0, a0, t0
+; RV32-NEXT:    add a6, a6, a5
+; RV32-NEXT:    sltu a3, a6, a5
+; RV32-NEXT:    sub a5, a0, a6
+; RV32-NEXT:    sltu a0, a0, a6
 ; RV32-NEXT:    add a2, a2, a3
 ; RV32-NEXT:    sub a1, a1, a0
 ; RV32-NEXT:    srli a5, a5, 1
-; RV32-NEXT:    add a2, t1, a2
+; RV32-NEXT:    add a2, a4, a2
 ; RV32-NEXT:    sub a1, a1, a2
 ; RV32-NEXT:    slli a0, a1, 31
 ; RV32-NEXT:    srli a1, a1, 1
 ; RV32-NEXT:    or a0, a5, a0
 ; RV32-NEXT:    add a1, a1, a2
-; RV32-NEXT:    add a0, a0, t0
-; RV32-NEXT:    sltu a2, a0, t0
+; RV32-NEXT:    add a0, a0, a6
+; RV32-NEXT:    sltu a2, a0, a6
 ; RV32-NEXT:    srli a0, a0, 2
 ; RV32-NEXT:    add a1, a1, a2
 ; RV32-NEXT:    slli a2, a1, 30

``````````

</details>


https://github.com/llvm/llvm-project/pull/177924


More information about the llvm-commits mailing list