[llvm] [DAG] Fold (X +/- Y) & Y --> ~X & Y when Y is a power of 2 (or zero). (PR #181677)

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Mon Feb 16 15:19:21 PST 2026


https://github.com/RKSimon updated https://github.com/llvm/llvm-project/pull/181677

>From dd2884aaed552e754029d97c93dbd743ab3462f0 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Mon, 16 Feb 2026 14:48:13 +0000
Subject: [PATCH 1/2] [DAG] Fold  (X +/- Y) & Y --> ~X & Y when Y is a power of
 2 (or zero).

Same as InstCombinerImpl::visitAnd

I'm still looking at the known-never-zero.ll changes - from logs it looks like RISCV prefers a long shift+add sequence to a call, but the loss of the initial AND(X,NEG(X)) pattern somehow screws it all up.

Alive2: https://alive2.llvm.org/ce/z/Khvs5H
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  7 +++++
 llvm/test/CodeGen/NVPTX/i1-ext-load.ll        |  7 ++---
 llvm/test/CodeGen/RISCV/idiv_large.ll         | 22 ++++++-------
 .../CodeGen/RISCV/rvv/known-never-zero.ll     | 31 +++++--------------
 llvm/test/CodeGen/X86/known-pow2.ll           | 20 +++++-------
 5 files changed, 37 insertions(+), 50 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index adc197069f7c3..766a4f7a8b2bc 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -7970,6 +7970,13 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
       return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, X.getOperand(0));
   }
 
+  // (X +/- Y) & Y --> ~X & Y when Y is a power of 2 (or zero).
+  if (sd_match(N, m_And(m_Value(Y),
+                        m_OneUse(m_AnyOf(m_Add(m_Value(X), m_Deferred(Y)),
+                                         m_Sub(m_Value(X), m_Deferred(Y)))))) &&
+      DAG.isKnownToBeAPowerOfTwo(Y, /*OrZero=*/true))
+    return DAG.getNode(ISD::AND, DL, VT, DAG.getNOT(DL, X, VT), Y);
+
   // fold (and (sign_extend_inreg x, i16 to i32), 1) -> (and x, 1)
   // fold (and (sra)) -> (and (srl)) when possible.
   if (SimplifyDemandedBits(SDValue(N, 0)))
diff --git a/llvm/test/CodeGen/NVPTX/i1-ext-load.ll b/llvm/test/CodeGen/NVPTX/i1-ext-load.ll
index 3dceefb93a47d..cc3ed047f6b17 100644
--- a/llvm/test/CodeGen/NVPTX/i1-ext-load.ll
+++ b/llvm/test/CodeGen/NVPTX/i1-ext-load.ll
@@ -7,7 +7,7 @@ target triple = "nvptx-nvidia-cuda"
 
 define ptx_kernel void @foo(ptr noalias readonly %ptr, ptr noalias %retval) {
 ; CHECK-LABEL: foo(
-; CHECK:    .reg .b32 %r<4>;
+; CHECK:    .reg .b32 %r<3>;
 ; CHECK:    .reg .b64 %rd<5>;
 ; CHECK-EMPTY:
 ; CHECK:    ld.param.b64 %rd1, [foo_param_0];
@@ -15,9 +15,8 @@ define ptx_kernel void @foo(ptr noalias readonly %ptr, ptr noalias %retval) {
 ; CHECK:    ld.param.b64 %rd3, [foo_param_1];
 ; CHECK:    cvta.to.global.u64 %rd4, %rd3;
 ; CHECK:    ld.global.nc.b8 %r1, [%rd2];
-; CHECK:    add.s32 %r2, %r1, 1;
-; CHECK:    and.b32 %r3, %r2, 1;
-; CHECK:    st.global.b32 [%rd4], %r3;
+; CHECK:    xor.b32 %r2, %r1, 1;
+; CHECK:    st.global.b32 [%rd4], %r2;
 ; CHECK:    ret;
   %ld = load i1, ptr %ptr, align 1
   %zext = zext i1 %ld to i32
diff --git a/llvm/test/CodeGen/RISCV/idiv_large.ll b/llvm/test/CodeGen/RISCV/idiv_large.ll
index 2ad605bc3ff9e..c75a807621b6c 100644
--- a/llvm/test/CodeGen/RISCV/idiv_large.ll
+++ b/llvm/test/CodeGen/RISCV/idiv_large.ll
@@ -362,7 +362,7 @@ define i65 @udiv_i65(i65 %x, i65 %y) nounwind {
 ; RV32-NEXT:    or t2, a5, a2
 ; RV32-NEXT:    seqz a2, a3
 ; RV32-NEXT:    sub a2, a4, a2
-; RV32-NEXT:    addi a5, t1, 1
+; RV32-NEXT:    not a5, t1
 ; RV32-NEXT:    andi a5, a5, 1
 ; RV32-NEXT:    andi s1, s1, 1
 ; RV32-NEXT:    srl t1, t4, a1
@@ -400,7 +400,7 @@ define i65 @udiv_i65(i65 %x, i65 %y) nounwind {
 ; RV32-NEXT:    andi s1, s0, 1
 ; RV32-NEXT:    sub t2, s2, t2
 ; RV32-NEXT:    add a6, a6, s3
-; RV32-NEXT:    addi a6, a6, 1
+; RV32-NEXT:    not a6, a6
 ; RV32-NEXT:    andi a6, a6, 1
 ; RV32-NEXT:    or t6, a1, t0
 ; RV32-NEXT:    or s2, t6, a6
@@ -1636,7 +1636,7 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    sub a5, a4, a2
 ; RV32-NEXT:    sw a5, 36(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sltu a2, a4, a2
-; RV32-NEXT:    addi a0, a0, 1
+; RV32-NEXT:    not a0, a0
 ; RV32-NEXT:    lw a4, 24(sp) # 4-byte Folded Reload
 ; RV32-NEXT:    sub s6, a4, a2
 ; RV32-NEXT:    andi a0, a0, 1
@@ -1700,17 +1700,17 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV32-NEXT:    add a2, s1, a2
 ; RV32-NEXT:    sub t3, t3, a4
 ; RV32-NEXT:    or a4, a1, t6
-; RV32-NEXT:    addi a2, a2, 1
-; RV32-NEXT:    or a5, t2, t3
-; RV32-NEXT:    andi s1, a2, 1
-; RV32-NEXT:    or a4, a4, a5
-; RV32-NEXT:    or a4, a4, s1
+; RV32-NEXT:    not s1, a2
+; RV32-NEXT:    or a2, t2, t3
+; RV32-NEXT:    andi s1, s1, 1
+; RV32-NEXT:    or a2, a4, a2
+; RV32-NEXT:    or a2, a2, s1
 ; RV32-NEXT:    sub a3, s10, a3
 ; RV32-NEXT:    sw zero, 48(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw zero, 44(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    sw zero, 40(sp) # 4-byte Folded Spill
 ; RV32-NEXT:    li s7, 0
-; RV32-NEXT:    beqz a4, .LBB3_56
+; RV32-NEXT:    beqz a2, .LBB3_56
 ; RV32-NEXT:  .LBB3_45: # %udiv-do-while
 ; RV32-NEXT:    # =>This Inner Loop Header: Depth=1
 ; RV32-NEXT:    srli a2, ra, 31
@@ -2185,7 +2185,7 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV64-NEXT:    or t3, a5, a2
 ; RV64-NEXT:    seqz a2, a3
 ; RV64-NEXT:    sub a2, a4, a2
-; RV64-NEXT:    addi a5, t1, 1
+; RV64-NEXT:    not a5, t1
 ; RV64-NEXT:    andi a5, a5, 1
 ; RV64-NEXT:    andi s1, s1, 1
 ; RV64-NEXT:    srl t1, t4, a1
@@ -2223,7 +2223,7 @@ define i129 @udiv_i129(i129 %x, i129 %y) nounwind {
 ; RV64-NEXT:    andi s1, s0, 1
 ; RV64-NEXT:    sub t3, s2, t3
 ; RV64-NEXT:    add a6, a6, s3
-; RV64-NEXT:    addi a6, a6, 1
+; RV64-NEXT:    not a6, a6
 ; RV64-NEXT:    andi a6, a6, 1
 ; RV64-NEXT:    or t6, a1, t0
 ; RV64-NEXT:    or s2, t6, a6
diff --git a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
index 749b2041aa63d..9e23414575ca2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
@@ -6,38 +6,23 @@
 ; known never zero.
 
 ; Even without vscale_range, vscale is always guaranteed to be non-zero.
-define i32 @vscale_known_nonzero() {
+define i32 @vscale_known_nonzero() nounwind {
 ; CHECK-LABEL: vscale_known_nonzero:
 ; CHECK:       # %bb.0:
+; CHECK-NEXT:    addi sp, sp, -16
+; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    csrr a0, vlenb
+; CHECK-NEXT:    lui a1, 30667
 ; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    neg a1, a0
-; CHECK-NEXT:    and a0, a0, a1
-; CHECK-NEXT:    slli a1, a0, 6
-; CHECK-NEXT:    slli a2, a0, 8
-; CHECK-NEXT:    slli a3, a0, 10
-; CHECK-NEXT:    slli a4, a0, 12
-; CHECK-NEXT:    add a1, a1, a2
-; CHECK-NEXT:    slli a2, a0, 16
-; CHECK-NEXT:    sub a3, a3, a4
-; CHECK-NEXT:    slli a4, a0, 18
-; CHECK-NEXT:    sub a2, a2, a4
-; CHECK-NEXT:    slli a4, a0, 4
-; CHECK-NEXT:    sub a4, a0, a4
-; CHECK-NEXT:    add a1, a4, a1
-; CHECK-NEXT:    slli a4, a0, 14
-; CHECK-NEXT:    sub a3, a3, a4
-; CHECK-NEXT:    slli a4, a0, 23
-; CHECK-NEXT:    sub a2, a2, a4
-; CHECK-NEXT:    slli a0, a0, 27
-; CHECK-NEXT:    add a1, a1, a3
-; CHECK-NEXT:    add a0, a2, a0
-; CHECK-NEXT:    add a0, a1, a0
+; CHECK-NEXT:    addi a1, a1, 1329
+; CHECK-NEXT:    call __muldi3
 ; CHECK-NEXT:    srliw a0, a0, 27
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI0_0)
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    lbu a0, 0(a0)
+; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
+; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
   %x = call i32 @llvm.vscale()
   %r = call i32 @llvm.cttz.i32(i32 %x, i1 false)
diff --git a/llvm/test/CodeGen/X86/known-pow2.ll b/llvm/test/CodeGen/X86/known-pow2.ll
index 09ceaf8eca3ac..868e4b11ef5a9 100644
--- a/llvm/test/CodeGen/X86/known-pow2.ll
+++ b/llvm/test/CodeGen/X86/known-pow2.ll
@@ -760,14 +760,12 @@ define i1 @pow2_and(i32 %x, i32 %y) {
 ; CHECK-LABEL: pow2_and:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
-; CHECK-NEXT:    movl $4, %eax
+; CHECK-NEXT:    movl %edi, %eax
 ; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT:    shll %cl, %eax
-; CHECK-NEXT:    movl %eax, %ecx
-; CHECK-NEXT:    negl %ecx
-; CHECK-NEXT:    andl %eax, %ecx
-; CHECK-NEXT:    testl %ecx, %edi
-; CHECK-NEXT:    setne %al
+; CHECK-NEXT:    shrl %cl, %eax
+; CHECK-NEXT:    andl $4, %eax
+; CHECK-NEXT:    shrl $2, %eax
+; CHECK-NEXT:    # kill: def $al killed $al killed $eax
 ; CHECK-NEXT:    retq
   %yy = shl nuw nsw i32 4, %y
   %nyy = sub i32 0, %yy
@@ -804,13 +802,11 @@ define i1 @pow2_and_fail1(i32 %x, i32 %y) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    movl %esi, %ecx
 ; CHECK-NEXT:    movl $1, %eax
-; CHECK-NEXT:    movl $1, %edx
 ; CHECK-NEXT:    # kill: def $cl killed $cl killed $ecx
-; CHECK-NEXT:    shll %cl, %edx
-; CHECK-NEXT:    subl %edx, %eax
-; CHECK-NEXT:    andl %edx, %eax
+; CHECK-NEXT:    shll %cl, %eax
 ; CHECK-NEXT:    notl %edi
-; CHECK-NEXT:    testl %edi, %eax
+; CHECK-NEXT:    andl %eax, %edi
+; CHECK-NEXT:    testl $-2, %edi
 ; CHECK-NEXT:    sete %al
 ; CHECK-NEXT:    retq
   %yy = shl i32 1, %y

>From ad38ffad1d803c80686ce89f78d8da6bd2d4c651 Mon Sep 17 00:00:00 2001
From: Simon Pilgrim <llvm-dev at redking.me.uk>
Date: Mon, 16 Feb 2026 23:18:52 +0000
Subject: [PATCH 2/2] Limit sub(x, vscale(c)) -> add(x, vscale(-c)) fold to
 targets with good ISD::MUL support

Don't lose a pow2 constant
---
 llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp |  5 ++-
 .../CodeGen/RISCV/rvv/known-never-zero.ll     | 31 +++++++++++++------
 2 files changed, 26 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 766a4f7a8b2bc..10ee289334ae5 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -4487,9 +4487,12 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
   }
 
   // canonicalize (sub X, (vscale * C)) to (add X, (vscale * -C))
+  // avoid if ISD::MUL handling is poor and ISD::SHL isn't an option.
   if (N1.getOpcode() == ISD::VSCALE && N1.hasOneUse()) {
     const APInt &IntVal = N1.getConstantOperandAPInt(0);
-    return DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getVScale(DL, VT, -IntVal));
+    if (!IntVal.isPowerOf2() ||
+        hasOperation(ISD::MUL, N1.getOperand(0).getValueType()))
+      return DAG.getNode(ISD::ADD, DL, VT, N0, DAG.getVScale(DL, VT, -IntVal));
   }
 
   // canonicalize (sub X, step_vector(C)) to (add X, step_vector(-C))
diff --git a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
index 9e23414575ca2..9687dced7eb2e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/known-never-zero.ll
@@ -6,23 +6,36 @@
 ; known never zero.
 
 ; Even without vscale_range, vscale is always guaranteed to be non-zero.
-define i32 @vscale_known_nonzero() nounwind {
+define i32 @vscale_known_nonzero() {
 ; CHECK-LABEL: vscale_known_nonzero:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    sd ra, 8(sp) # 8-byte Folded Spill
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    lui a1, 30667
-; CHECK-NEXT:    srli a0, a0, 3
-; CHECK-NEXT:    addi a1, a1, 1329
-; CHECK-NEXT:    call __muldi3
+; CHECK-NEXT:    srli a1, a0, 3
+; CHECK-NEXT:    slli a2, a0, 1
+; CHECK-NEXT:    slli a3, a0, 3
+; CHECK-NEXT:    slli a4, a0, 5
+; CHECK-NEXT:    slli a5, a0, 7
+; CHECK-NEXT:    sub a1, a1, a2
+; CHECK-NEXT:    slli a2, a0, 9
+; CHECK-NEXT:    add a3, a3, a4
+; CHECK-NEXT:    slli a4, a0, 13
+; CHECK-NEXT:    sub a5, a5, a2
+; CHECK-NEXT:    slli a2, a0, 15
+; CHECK-NEXT:    sub a4, a4, a2
+; CHECK-NEXT:    add a1, a1, a3
+; CHECK-NEXT:    slli a2, a0, 11
+; CHECK-NEXT:    sub a5, a5, a2
+; CHECK-NEXT:    slli a2, a0, 20
+; CHECK-NEXT:    sub a4, a4, a2
+; CHECK-NEXT:    slli a0, a0, 24
+; CHECK-NEXT:    add a1, a1, a5
+; CHECK-NEXT:    add a0, a4, a0
+; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    srliw a0, a0, 27
 ; CHECK-NEXT:    lui a1, %hi(.LCPI0_0)
 ; CHECK-NEXT:    addi a1, a1, %lo(.LCPI0_0)
 ; CHECK-NEXT:    add a0, a1, a0
 ; CHECK-NEXT:    lbu a0, 0(a0)
-; CHECK-NEXT:    ld ra, 8(sp) # 8-byte Folded Reload
-; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
   %x = call i32 @llvm.vscale()
   %r = call i32 @llvm.cttz.i32(i32 %x, i1 false)



More information about the llvm-commits mailing list