[llvm] [GlobalISel] Fold `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)` (PR #181725)

via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 16 11:19:46 PST 2026


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-globalisel

Author: Osman Yasar (osmanyasar05)

<details>
<summary>Changes</summary>

Based on the suggestions in #<!-- -->140639, this PR adds the rewrite pattern `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)` for AND, OR, and XOR operations. This rewrite enables lowering to `ANDN`, `ORN`, and `XORN` operations.

Added new MIR tests in `combine-binop-neg.mir` for AArch64 to verify the new combine works for various commuted and uncommuted forms with AND, OR, and XOR and added new LLVM IR tests for RISC-V in `rv32zbb-zbkb.ll` to ensure the combine produces the expected `ANDN`, `ORN`, and `XORN` operations.

---
Full diff: https://github.com/llvm/llvm-project/pull/181725.diff


5 Files Affected:

- (modified) llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h (+3) 
- (modified) llvm/include/llvm/Target/GlobalISel/Combine.td (+9-1) 
- (modified) llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp (+62) 
- (added) llvm/test/CodeGen/AArch64/GlobalISel/combine-binop-neg.mir (+205) 
- (modified) llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll (+57) 


``````````diff
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
index da53005ed801e..33b0ebf686421 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/CombinerHelper.h
@@ -534,6 +534,9 @@ class CombinerHelper {
   void applySimplifyAddToSub(MachineInstr &MI,
                              std::tuple<Register, Register> &MatchInfo) const;
 
+  /// Fold `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)`
+  bool matchBinopWithNeg(MachineInstr &MI, BuildFnTy &MatchInfo) const;
+
   /// Match (logic_op (op x...), (op y...)) -> (op (logic_op x, y))
   bool matchHoistLogicOpWithSameOpcodeHands(
       MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const;
diff --git a/llvm/include/llvm/Target/GlobalISel/Combine.td b/llvm/include/llvm/Target/GlobalISel/Combine.td
index f5c940bffc8fb..15e5ab6f974fc 100644
--- a/llvm/include/llvm/Target/GlobalISel/Combine.td
+++ b/llvm/include/llvm/Target/GlobalISel/Combine.td
@@ -621,6 +621,14 @@ def binop_left_to_zero: GICombineRule<
   (apply (GIReplaceReg $dst, $zero))
 >;
 
+// Fold `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)`
+def binop_with_neg : GICombineRule<
+  (defs root:$root, build_fn_matchinfo:$matchinfo),
+  (match (wip_match_opcode G_AND, G_OR, G_XOR):$root,
+    [{ return Helper.matchBinopWithNeg(*${root}, ${matchinfo}); }]),
+  (apply [{ Helper.applyBuildFn(*${root}, ${matchinfo}); }])
+>;
+
 def urem_pow2_to_mask : GICombineRule<
   (defs root:$root),
   (match (wip_match_opcode G_UREM):$root,
@@ -2296,7 +2304,7 @@ def all_combines : GICombineGroup<[integer_reassoc_combines, trivial_combines,
     simplify_neg_minmax, combine_concat_vector,
     sext_trunc, zext_trunc, prefer_sign_combines, shuffle_combines,
     combine_use_vector_truncate, merge_combines, overflow_combines, 
-    truncsat_combines, lshr_of_trunc_of_lshr, ctls_combines, add_shift]>;
+    truncsat_combines, lshr_of_trunc_of_lshr, ctls_combines, add_shift, binop_with_neg]>;
 
 // A combine group used to for prelegalizer combiners at -O0. The combines in
 // this group have been selected based on experiments to balance code size and
diff --git a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
index b9273d388ea70..70e755b8352b0 100644
--- a/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/CombinerHelper.cpp
@@ -3185,6 +3185,68 @@ void CombinerHelper::applySimplifyAddToSub(
   MI.eraseFromParent();
 }
 
+bool CombinerHelper::matchBinopWithNeg(MachineInstr &MI,
+                                       BuildFnTy &MatchInfo) const {
+  // Fold `a bitwiseop (~b +/- c)` -> `a bitwiseop ~(b -/+ c)`
+  // Root MI is one of G_AND, G_OR, G_XOR.
+  // We also look for commuted forms of operations.
+
+  unsigned RootOpc = MI.getOpcode();
+  Register Dst = MI.getOperand(0).getReg();
+  LLT Ty = MRI.getType(Dst);
+
+  auto TryMatch = [&](Register MaybeInner, Register Other) -> bool {
+    MachineInstr *InnerDef = MRI.getVRegDef(MaybeInner);
+    if (!InnerDef)
+      return false;
+
+    unsigned InnerOpc = InnerDef->getOpcode();
+    if (InnerOpc != TargetOpcode::G_ADD && InnerOpc != TargetOpcode::G_SUB)
+      return false;
+
+    if (!MRI.hasOneNonDBGUse(MaybeInner))
+      return false;
+
+    Register InnerLHS = InnerDef->getOperand(1).getReg();
+    Register InnerRHS = InnerDef->getOperand(2).getReg();
+    Register NotSrc;
+    Register B, C;
+
+    // Check if either operand is ~b
+    if (mi_match(InnerLHS, MRI, m_Not(m_Reg(NotSrc)))) {
+      if (!MRI.hasOneNonDBGUse(InnerLHS))
+      return false;
+      B = NotSrc;
+      C = InnerRHS;
+    } else if (mi_match(InnerRHS, MRI, m_Not(m_Reg(NotSrc)))) {
+      if (!MRI.hasOneNonDBGUse(InnerRHS))
+      return false;
+      B = NotSrc;
+      C = InnerLHS;
+    } else {
+      return false;
+    }
+
+    // Flip add/sub
+    unsigned FlippedOpc = (InnerOpc == TargetOpcode::G_ADD)
+                              ? TargetOpcode::G_SUB
+                              : TargetOpcode::G_ADD;
+
+    Register A = Other;
+    MatchInfo = [=](MachineIRBuilder &Builder) {
+      auto NewInner = Builder.buildInstr(FlippedOpc, {Ty}, {B, C});
+      auto NewNot = Builder.buildNot(Ty, NewInner);
+      Builder.buildInstr(RootOpc, {Dst}, {A, NewNot});
+    };
+    return true;
+  };
+
+  Register LHS = MI.getOperand(1).getReg();
+  Register RHS = MI.getOperand(2).getReg();
+  // Check the commuted and uncommuted forms of the operation.
+  return TryMatch(LHS, RHS) || TryMatch(RHS, LHS);
+}
+
 bool CombinerHelper::matchHoistLogicOpWithSameOpcodeHands(
     MachineInstr &MI, InstructionStepsMatchInfo &MatchInfo) const {
   // Matches: logic (hand x, ...), (hand y, ...) -> hand (logic x, y), ...
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/combine-binop-neg.mir b/llvm/test/CodeGen/AArch64/GlobalISel/combine-binop-neg.mir
new file mode 100644
index 0000000000000..c3818a22e8d53
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/combine-binop-neg.mir
@@ -0,0 +1,205 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -run-pass=aarch64-prelegalizer-combiner -verify-machineinstrs -mtriple aarch64-unknown-unknown %s -o - | FileCheck %s
+
+---
+name:            binop_with_neg_or
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $x1, $x2, $x3
+
+    ; CHECK-LABEL: name: binop_with_neg_or
+    ; CHECK: liveins: $x0, $x1, $x2, $x3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x0
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %mone:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB %b, %c
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SUB]], %mone
+    ; CHECK-NEXT: %dst:_(s64) = G_OR %a, [[XOR]]
+    ; CHECK-NEXT: $x0 = COPY %dst(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %mone:_(s64) = G_CONSTANT i64 -1
+    %neg_y:_(s64) = G_XOR %mone, %b
+    %add:_(s64) = G_ADD %neg_y, %c
+    %dst:_(s64) = G_OR %a, %add
+    $x0 = COPY %dst
+    RET_ReallyLR implicit $x0
+...
+---
+name:            binop_with_neg_or_commuted
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $x1, $x2, $x3
+
+    ; CHECK-LABEL: name: binop_with_neg_or_commuted
+    ; CHECK: liveins: $x0, $x1, $x2, $x3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x0
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %mone:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB %b, %c
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SUB]], %mone
+    ; CHECK-NEXT: %dst:_(s64) = G_OR %a, [[XOR]]
+    ; CHECK-NEXT: $x0 = COPY %dst(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %mone:_(s64) = G_CONSTANT i64 -1
+    %neg_y:_(s64) = G_XOR %mone, %b
+    %add:_(s64) = G_ADD %neg_y, %c
+    %dst:_(s64) = G_OR %add, %a
+    $x0 = COPY %dst
+    RET_ReallyLR implicit $x0
+...
+---
+name:            binop_with_neg_xor
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $x1, $x2, $x3
+
+    ; CHECK-LABEL: name: binop_with_neg_xor
+    ; CHECK: liveins: $x0, $x1, $x2, $x3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x0
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %mone:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB %b, %c
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SUB]], %mone
+    ; CHECK-NEXT: %dst:_(s64) = G_XOR %a, [[XOR]]
+    ; CHECK-NEXT: $x0 = COPY %dst(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %mone:_(s64) = G_CONSTANT i64 -1
+    %neg_y:_(s64) = G_XOR %mone, %b
+    %add:_(s64) = G_ADD %neg_y, %c
+    %dst:_(s64) = G_XOR %a, %add
+    $x0 = COPY %dst
+    RET_ReallyLR implicit $x0
+...
+---
+name:            binop_with_neg_xor_commuted
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $x1, $x2, $x3
+
+    ; CHECK-LABEL: name: binop_with_neg_xor_commuted
+    ; CHECK: liveins: $x0, $x1, $x2, $x3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x0
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %mone:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB %b, %c
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SUB]], %mone
+    ; CHECK-NEXT: %dst:_(s64) = G_XOR %a, [[XOR]]
+    ; CHECK-NEXT: $x0 = COPY %dst(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %mone:_(s64) = G_CONSTANT i64 -1
+    %neg_y:_(s64) = G_XOR %mone, %b
+    %add:_(s64) = G_ADD %c, %neg_y
+    %dst:_(s64) = G_XOR %add, %a
+    $x0 = COPY %dst
+    RET_ReallyLR implicit $x0
+...
+---
+name:            binop_with_neg_and
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $x1, $x2, $x3
+
+    ; CHECK-LABEL: name: binop_with_neg_and
+    ; CHECK: liveins: $x0, $x1, $x2, $x3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x0
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %mone:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB %b, %c
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SUB]], %mone
+    ; CHECK-NEXT: %dst:_(s64) = G_AND %a, [[XOR]]
+    ; CHECK-NEXT: $x0 = COPY %dst(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %mone:_(s64) = G_CONSTANT i64 -1
+    %neg_y:_(s64) = G_XOR %mone, %b
+    %add:_(s64) = G_ADD %neg_y, %c
+    %dst:_(s64) = G_AND %a, %add
+    $x0 = COPY %dst
+    RET_ReallyLR implicit $x0
+...
+---
+name:            binop_with_neg_and_commuted_and
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $x1, $x2, $x3
+
+    ; CHECK-LABEL: name: binop_with_neg_and_commuted_and
+    ; CHECK: liveins: $x0, $x1, $x2, $x3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x0
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %mone:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB %b, %c
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SUB]], %mone
+    ; CHECK-NEXT: %dst:_(s64) = G_AND %a, [[XOR]]
+    ; CHECK-NEXT: $x0 = COPY %dst(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %mone:_(s64) = G_CONSTANT i64 -1
+    %neg_y:_(s64) = G_XOR %mone, %b
+    %add:_(s64) = G_ADD %c, %neg_y
+    %dst:_(s64) = G_AND %add, %a
+    $x0 = COPY %dst
+    RET_ReallyLR implicit $x0
+...
+---
+name:            binop_with_neg_and_commuted_add
+tracksRegLiveness: true
+body: |
+  bb.0:
+    liveins: $x0, $x1, $x2, $x3
+
+    ; CHECK-LABEL: name: binop_with_neg_and_commuted_add
+    ; CHECK: liveins: $x0, $x1, $x2, $x3
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %a:_(s64) = COPY $x0
+    ; CHECK-NEXT: %b:_(s64) = COPY $x1
+    ; CHECK-NEXT: %c:_(s64) = COPY $x2
+    ; CHECK-NEXT: %mone:_(s64) = G_CONSTANT i64 -1
+    ; CHECK-NEXT: [[SUB:%[0-9]+]]:_(s64) = G_SUB %b, %c
+    ; CHECK-NEXT: [[XOR:%[0-9]+]]:_(s64) = G_XOR [[SUB]], %mone
+    ; CHECK-NEXT: %dst:_(s64) = G_AND %a, [[XOR]]
+    ; CHECK-NEXT: $x0 = COPY %dst(s64)
+    ; CHECK-NEXT: RET_ReallyLR implicit $x0
+    %a:_(s64) = COPY $x0
+    %b:_(s64) = COPY $x1
+    %c:_(s64) = COPY $x2
+    %mone:_(s64) = G_CONSTANT i64 -1
+    %neg_y:_(s64) = G_XOR %mone, %b
+    %add:_(s64) = G_ADD %c, %neg_y
+    %dst:_(s64) = G_AND %a, %add
+    $x0 = COPY %dst
+    RET_ReallyLR implicit $x0
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
index 83cf228402295..757070eaff802 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/rv32zbb-zbkb.ll
@@ -389,3 +389,60 @@ define i16 @srai_i16(i16 %a) nounwind {
   %1 = ashr i16 %a, 9
   ret i16 %1
 }
+
+define i32 @binop_neg_and(i32 %a, i32 %b, i32 %c) {
+; RV32I-LABEL: binop_neg_and:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    and a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: binop_neg_and:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    add a1, a1, a2
+; RV32ZBB-ZBKB-NEXT:    andn a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
+  %not_b = xor i32 %b, -1
+  %sub = sub i32 %not_b, %c
+  %and = and i32 %a, %sub
+  ret i32 %and
+}
+
+define i32 @binop_neg_or(i32 %a, i32 %b, i32 %c) {
+; RV32I-LABEL: binop_neg_or:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    or a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: binop_neg_or:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    add a1, a1, a2
+; RV32ZBB-ZBKB-NEXT:    orn a0, a0, a1
+; RV32ZBB-ZBKB-NEXT:    ret
+  %not_b = xor i32 %b, -1
+  %sub = sub i32 %not_b, %c
+  %or = or i32 %a, %sub
+  ret i32 %or
+}
+
+define i32 @binop_neg_xor(i32 %a, i32 %b, i32 %c) {
+; RV32I-LABEL: binop_neg_xor:
+; RV32I:       # %bb.0:
+; RV32I-NEXT:    add a1, a1, a2
+; RV32I-NEXT:    not a1, a1
+; RV32I-NEXT:    xor a0, a0, a1
+; RV32I-NEXT:    ret
+;
+; RV32ZBB-ZBKB-LABEL: binop_neg_xor:
+; RV32ZBB-ZBKB:       # %bb.0:
+; RV32ZBB-ZBKB-NEXT:    add a1, a1, a2
+; RV32ZBB-ZBKB-NEXT:    xnor a0, a1, a0
+; RV32ZBB-ZBKB-NEXT:    ret
+  %not_b = xor i32 %b, -1
+  %sub = sub i32 %not_b, %c
+  %xor = xor i32 %a, %sub
+  ret i32 %xor
+}

``````````

</details>


https://github.com/llvm/llvm-project/pull/181725


More information about the llvm-commits mailing list