[llvm] d8d4096 - [GlobalISel] Rewrite binop_left_to_zero using MIR Patterns (#177924)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 29 10:28:39 PST 2026


Author: Osman Yasar
Date: 2026-01-29T18:28:34Z
New Revision: d8d4096c0be0a6a3248c8deae96608913a85debf

URL: https://github.com/llvm/llvm-project/commit/d8d4096c0be0a6a3248c8deae96608913a85debf
DIFF: https://github.com/llvm/llvm-project/commit/d8d4096c0be0a6a3248c8deae96608913a85debf.diff

LOG: [GlobalISel] Rewrite binop_left_to_zero using MIR Patterns (#177924)

Following 2d87319f06ef936233ba6aaa612da9586c427d68, this PR rewrites the
`binop_left_to_zero` rule using MIR Patterns.

The new pattern uses `GIReplaceReg` in the apply clause. According to
[MIRPatterns.rst](https://github.com/llvm/llvm-project/blob/5b4a5cf51f37cb42c5e0bdb22b43acde137281c8/llvm/docs/GlobalISel/MIRPatterns.rst?plain=1#L222),
`GIReplaceReg` checks `canReplaceReg`, so the new apply pattern is
equivalent to the old `matchOperandIsZero` implementation.

Added tests for all the opcodes covered by this rule `(G_SHL, G_LSHR,
G_ASHR, G_SDIV, G_UDIV, G_SREM, G_UREM, G_MUL)`.

Added: 
    

Modified: 
    llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
    llvm/include/llvm/Target/GlobalISel/Combine.td
    llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
    llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shl-from-extend-narrow.postlegal.mir
    llvm/test/CodeGen/RISCV/GlobalISel/combine.mir
    llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index 5d4347066a40c..da53005ed801e 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -518,9 +518,6 @@ class CombinerHelper {
   /// Optimize (x op x) -> x
   bool matchBinOpSameVal(MachineInstr &MI) const;
 
-  /// Check if operand \p OpIdx is zero.
-  bool matchOperandIsZero(MachineInstr &MI, unsigned OpIdx) const;
-
   /// Check if operand \p OpIdx is undef.
   bool matchOperandIsUndef(MachineInstr &MI, unsigned OpIdx) const;
 

diff  --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index 3b8f0830cf0ef..1e15753848a45 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -609,12 +609,16 @@ def binop_same_val: GICombineRule<
 >;
 
 // Fold (0 op x) - > 0
+def binop_left_to_zero_frags : GICombinePatFrag<
+  (outs root:$dst, $zero), (ins $rhs),
+  !foreach(op,
+           [G_SHL, G_LSHR, G_ASHR, G_SDIV, G_UDIV, G_SREM, G_UREM, G_MUL],
+           (pattern (G_CONSTANT $zero, 0), (op $dst, $zero, $rhs)))>;
+
 def binop_left_to_zero: GICombineRule<
-  (defs root:$root),
-  (match (wip_match_opcode G_SHL, G_LSHR, G_ASHR, G_SDIV, G_UDIV, G_SREM,
-                           G_UREM):$root,
-    [{ return Helper.matchOperandIsZero(*${root}, 1); }]),
-  (apply [{ Helper.replaceSingleDefInstWithOperand(*${root}, 1); }])
+  (defs root:$dst),
+  (match (binop_left_to_zero_frags $dst, $zero, $rhs)),
+  (apply (GIReplaceReg $dst, $zero))
 >;
 
 def urem_pow2_to_mask : GICombineRule<

diff  --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index 08696f08284b1..b9273d388ea70 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -3046,13 +3046,6 @@ bool CombinerHelper::matchBinOpSameVal(MachineInstr &MI) const {
                        MRI);
 }
 
-bool CombinerHelper::matchOperandIsZero(MachineInstr &MI,
-                                        unsigned OpIdx) const {
-  return matchConstantOp(MI.getOperand(OpIdx), 0) &&
-         canReplaceReg(MI.getOperand(0).getReg(), MI.getOperand(OpIdx).getReg(),
-                       MRI);
-}
-
 bool CombinerHelper::matchOperandIsUndef(MachineInstr &MI,
                                          unsigned OpIdx) const {
   MachineOperand &MO = MI.getOperand(OpIdx);

diff  --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shl-from-extend-narrow.postlegal.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shl-from-extend-narrow.postlegal.mir
index a8cd974b01ab4..fd5e01acdc176 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shl-from-extend-narrow.postlegal.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/combine-shl-from-extend-narrow.postlegal.mir
@@ -348,14 +348,14 @@ body:             |
     ; GFX6-LABEL: name: do_not_shl_s32_zero_by_16_from_zext_s16
     ; GFX6: liveins: $vgpr0
     ; GFX6-NEXT: {{  $}}
-    ; GFX6-NEXT: %extend:_(s32) = G_CONSTANT i32 0
-    ; GFX6-NEXT: $vgpr0 = COPY %extend(s32)
+    ; GFX6-NEXT: %shl:_(s32) = G_CONSTANT i32 0
+    ; GFX6-NEXT: $vgpr0 = COPY %shl(s32)
     ;
     ; GFX9-LABEL: name: do_not_shl_s32_zero_by_16_from_zext_s16
     ; GFX9: liveins: $vgpr0
     ; GFX9-NEXT: {{  $}}
-    ; GFX9-NEXT: %extend:_(s32) = G_CONSTANT i32 0
-    ; GFX9-NEXT: $vgpr0 = COPY %extend(s32)
+    ; GFX9-NEXT: %shl:_(s32) = G_CONSTANT i32 0
+    ; GFX9-NEXT: $vgpr0 = COPY %shl(s32)
     %zero:_(s16) = G_CONSTANT i16 0
     %extend:_(s32) = G_ZEXT %zero:_(s16)
     %shiftamt:_(s16) = G_CONSTANT i16 16
@@ -374,15 +374,15 @@ body:             |
     ; GFX6-LABEL: name: do_not_shl_v2s32_zero_by_16_from_zext_v2s16
     ; GFX6: liveins: $vgpr0
     ; GFX6-NEXT: {{  $}}
-    ; GFX6-NEXT: %6:_(s32) = G_CONSTANT i32 0
-    ; GFX6-NEXT: %shl:_(<2 x s32>) = G_BUILD_VECTOR %6(s32), %6(s32)
+    ; GFX6-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX6-NEXT: %shl:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
     ; GFX6-NEXT: $vgpr0_vgpr1 = COPY %shl(<2 x s32>)
     ;
     ; GFX9-LABEL: name: do_not_shl_v2s32_zero_by_16_from_zext_v2s16
     ; GFX9: liveins: $vgpr0
     ; GFX9-NEXT: {{  $}}
-    ; GFX9-NEXT: %6:_(s32) = G_CONSTANT i32 0
-    ; GFX9-NEXT: %shl:_(<2 x s32>) = G_BUILD_VECTOR %6(s32), %6(s32)
+    ; GFX9-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+    ; GFX9-NEXT: %shl:_(<2 x s32>) = G_BUILD_VECTOR [[C]](s32), [[C]](s32)
     ; GFX9-NEXT: $vgpr0_vgpr1 = COPY %shl(<2 x s32>)
     %zero:_(s16) = G_CONSTANT i16 0
     %zerovector:_(<2 x s16>) = G_BUILD_VECTOR %zero, %zero:_(s16)

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/combine.mir b/llvm/test/CodeGen/RISCV/GlobalISel/combine.mir
index cd574945965b5..0d12e83354cb5 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/combine.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/combine.mir
@@ -17,3 +17,122 @@ body:             |
     $x10 = COPY %3(s64)
     PseudoRET implicit $x10
 ...
+
+---
+name:            binop_left_to_zero_shl
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_shl
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_SHL %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_lshr
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_lshr
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_LSHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_ashr
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_ashr
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_ASHR %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_sdiv
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_sdiv
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_SDIV %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_udiv
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_udiv
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_UDIV %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_srem
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_srem
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_SREM %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_urem
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_urem
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_UREM %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10
+...
+
+---
+name:            binop_left_to_zero_mul
+body:             |
+  bb.0:
+    ; RV64-LABEL: name: binop_left_to_zero_mul
+    ; RV64: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+    ; RV64-NEXT: $x10 = COPY [[C]](s64)
+    ; RV64-NEXT: PseudoRET implicit $x10
+    %0:_(s64) = G_CONSTANT i64 0
+    %1:_(s64) = COPY $x10
+    %2:_(s64) = G_MUL %0, %1
+    $x10 = COPY %2(s64)
+    PseudoRET implicit $x10

diff  --git a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
index 5f61ee2d02d24..7c0e91fc4a291 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/div-by-constant.ll
@@ -97,30 +97,28 @@ define i64 @udiv64_constant_no_add(i64 %a) nounwind {
 ; RV32-NEXT:    mul a5, a1, a4
 ; RV32-NEXT:    mul a6, a0, a2
 ; RV32-NEXT:    mulhu a7, a0, a4
-; RV32-NEXT:    mul t1, a1, a2
-; RV32-NEXT:    mulhu t2, a1, a4
+; RV32-NEXT:    mul t0, a1, a2
+; RV32-NEXT:    mulhu t1, a1, a4
 ; RV32-NEXT:    mulhu a0, a0, a2
 ; RV32-NEXT:    mulhu a1, a1, a2
 ; RV32-NEXT:    add a5, a5, a6
-; RV32-NEXT:    mv t0, t1
+; RV32-NEXT:    sltu a2, t0, t0
 ; RV32-NEXT:    mv a1, a1
-; RV32-NEXT:    sltu a4, a5, a6
+; RV32-NEXT:    sltiu a4, t0, 0
+; RV32-NEXT:    add t0, t0, t1
+; RV32-NEXT:    sltu a6, a5, a6
 ; RV32-NEXT:    add a5, a5, a7
-; RV32-NEXT:    sltu a6, t1, t1
-; RV32-NEXT:    sltiu t1, t1, 0
-; RV32-NEXT:    add t0, t0, t2
-; RV32-NEXT:    mv a1, a1
-; RV32-NEXT:    sltu a2, a5, a7
-; RV32-NEXT:    add a6, a6, t1
-; RV32-NEXT:    sltu a5, t0, t2
+; RV32-NEXT:    add a2, a2, a4
+; RV32-NEXT:    sltu a4, t0, t1
 ; RV32-NEXT:    add t0, t0, a0
 ; RV32-NEXT:    mv a1, a1
-; RV32-NEXT:    add a2, a4, a2
-; RV32-NEXT:    add a5, a6, a5
+; RV32-NEXT:    sltu a3, a5, a7
+; RV32-NEXT:    add a2, a2, a4
 ; RV32-NEXT:    sltu a0, t0, a0
-; RV32-NEXT:    add a0, a5, a0
-; RV32-NEXT:    add t0, t0, a2
-; RV32-NEXT:    sltu a2, t0, a2
+; RV32-NEXT:    add a3, a6, a3
+; RV32-NEXT:    add a0, a2, a0
+; RV32-NEXT:    add t0, t0, a3
+; RV32-NEXT:    sltu a2, t0, a3
 ; RV32-NEXT:    srli a3, t0, 2
 ; RV32-NEXT:    add a0, a0, a2
 ; RV32-NEXT:    add a1, a1, a0
@@ -154,44 +152,43 @@ define i64 @udiv64_constant_add(i64 %a) nounwind {
 ; RV32-NEXT:    addi a2, a2, 1171
 ; RV32-NEXT:    addi a3, a3, -1756
 ; RV32-NEXT:    mul a5, a1, a2
-; RV32-NEXT:    mul a6, a0, a3
+; RV32-NEXT:    mul a6, a1, a3
+; RV32-NEXT:    mulhu t0, a1, a3
+; RV32-NEXT:    sltu t1, a6, a6
+; RV32-NEXT:    mv a7, t0
+; RV32-NEXT:    sltiu t0, a6, 0
+; RV32-NEXT:    add t0, t1, t0
+; RV32-NEXT:    mul t1, a0, a3
+; RV32-NEXT:    mv a4, a7
 ; RV32-NEXT:    mulhu a7, a0, a2
-; RV32-NEXT:    mulhu t2, a1, a3
-; RV32-NEXT:    mv t1, t2
-; RV32-NEXT:    mv t1, t1
-; RV32-NEXT:    mul t2, a1, a3
 ; RV32-NEXT:    mulhu a2, a1, a2
 ; RV32-NEXT:    mulhu a3, a0, a3
-; RV32-NEXT:    add a5, a5, a6
-; RV32-NEXT:    mv t0, t2
-; RV32-NEXT:    sltu a6, a5, a6
+; RV32-NEXT:    add a5, a5, t1
+; RV32-NEXT:    add a6, a6, a2
+; RV32-NEXT:    sltu t1, a5, t1
 ; RV32-NEXT:    add a5, a5, a7
-; RV32-NEXT:    sltu t2, t2, t2
+; RV32-NEXT:    sltu a2, a6, a2
+; RV32-NEXT:    add a6, a6, a3
 ; RV32-NEXT:    sltu a5, a5, a7
-; RV32-NEXT:    sltiu a7, t0, 0
-; RV32-NEXT:    add t0, t0, a2
-; RV32-NEXT:    add a7, t2, a7
-; RV32-NEXT:    sltu a2, t0, a2
-; RV32-NEXT:    add t0, t0, a3
-; RV32-NEXT:    add a5, a6, a5
-; RV32-NEXT:    add a2, a7, a2
-; RV32-NEXT:    sltu a3, t0, a3
+; RV32-NEXT:    add a2, t0, a2
+; RV32-NEXT:    sltu a3, a6, a3
+; RV32-NEXT:    add a5, t1, a5
 ; RV32-NEXT:    add a2, a2, a3
-; RV32-NEXT:    add t0, t0, a5
-; RV32-NEXT:    sltu a3, t0, a5
-; RV32-NEXT:    sub a5, a0, t0
-; RV32-NEXT:    sltu a0, a0, t0
+; RV32-NEXT:    add a6, a6, a5
+; RV32-NEXT:    sltu a3, a6, a5
+; RV32-NEXT:    sub a5, a0, a6
+; RV32-NEXT:    sltu a0, a0, a6
 ; RV32-NEXT:    add a2, a2, a3
 ; RV32-NEXT:    sub a1, a1, a0
 ; RV32-NEXT:    srli a5, a5, 1
-; RV32-NEXT:    add a2, t1, a2
+; RV32-NEXT:    add a2, a4, a2
 ; RV32-NEXT:    sub a1, a1, a2
 ; RV32-NEXT:    slli a0, a1, 31
 ; RV32-NEXT:    srli a1, a1, 1
 ; RV32-NEXT:    or a0, a5, a0
 ; RV32-NEXT:    add a1, a1, a2
-; RV32-NEXT:    add a0, a0, t0
-; RV32-NEXT:    sltu a2, a0, t0
+; RV32-NEXT:    add a0, a0, a6
+; RV32-NEXT:    sltu a2, a0, a6
 ; RV32-NEXT:    srli a0, a0, 2
 ; RV32-NEXT:    add a1, a1, a2
 ; RV32-NEXT:    slli a2, a1, 30


        


More information about the llvm-commits mailing list