[llvm] c4b1557 - [DAG] Fold (setcc ((x | x >> c0 | ...) & mask)) sequences (#146054)

via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 30 01:27:23 PDT 2025


Author: Pierre van Houtryve
Date: 2025-07-30T10:27:19+02:00
New Revision: c4b155709714fb3381049b6d523c1f518dc363f5

URL: https://github.com/llvm/llvm-project/commit/c4b155709714fb3381049b6d523c1f518dc363f5
DIFF: https://github.com/llvm/llvm-project/commit/c4b155709714fb3381049b6d523c1f518dc363f5.diff

LOG: [DAG] Fold (setcc ((x | x >> c0 | ...) & mask)) sequences (#146054)

Fold sequences where we extract a bunch of contiguous bits from a value,
merge them into the low bit and then check if the low bits are zero or
not.

Usually the and would be on the outside (the leaves) of the expression,
but the DAG canonicalizes it to a single `and` at the root of the
expression.

The reason I put this in DAGCombiner instead of the target combiner is
because this is a generic, valid transform that's also fairly niche, so
there isn't much risk of a combine loop I think.

See #136727

Added: 
    llvm/test/CodeGen/AMDGPU/merged-bfx-opt.ll

Modified: 
    llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
    llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 20b96f5e1bc00..a43020ee62281 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -28982,13 +28982,100 @@ SDValue DAGCombiner::SimplifySelectCC(const SDLoc &DL, SDValue N0, SDValue N1,
   return SDValue();
 }
 
+static SDValue matchMergedBFX(SDValue Root, SelectionDAG &DAG,
+                              const TargetLowering &TLI) {
+  // Match a pattern such as:
+  //  (X | (X >> C0) | (X >> C1) | ...) & Mask
+  // This extracts contiguous parts of X and ORs them together before comparing.
+  // We can optimize this so that we directly check (X & SomeMask) instead,
+  // eliminating the shifts.
+
+  EVT VT = Root.getValueType();
+
+  // TODO: Support vectors?
+  if (!VT.isScalarInteger() || Root.getOpcode() != ISD::AND)
+    return SDValue();
+
+  SDValue N0 = Root.getOperand(0);
+  SDValue N1 = Root.getOperand(1);
+
+  if (N0.getOpcode() != ISD::OR || !isa<ConstantSDNode>(N1))
+    return SDValue();
+
+  APInt RootMask = cast<ConstantSDNode>(N1)->getAsAPIntVal();
+
+  SDValue Src;
+  const auto IsSrc = [&](SDValue V) {
+    if (!Src) {
+      Src = V;
+      return true;
+    }
+
+    return Src == V;
+  };
+
+  SmallVector<SDValue> Worklist = {N0};
+  APInt PartsMask(VT.getSizeInBits(), 0);
+  while (!Worklist.empty()) {
+    SDValue V = Worklist.pop_back_val();
+    if (!V.hasOneUse() && (Src && Src != V))
+      return SDValue();
+
+    if (V.getOpcode() == ISD::OR) {
+      Worklist.push_back(V.getOperand(0));
+      Worklist.push_back(V.getOperand(1));
+      continue;
+    }
+
+    if (V.getOpcode() == ISD::SRL) {
+      SDValue ShiftSrc = V.getOperand(0);
+      SDValue ShiftAmt = V.getOperand(1);
+
+      if (!IsSrc(ShiftSrc) || !isa<ConstantSDNode>(ShiftAmt))
+        return SDValue();
+
+      auto ShiftAmtVal = cast<ConstantSDNode>(ShiftAmt)->getAsZExtVal();
+      if (ShiftAmtVal > RootMask.getBitWidth())
+        return SDValue();
+
+      PartsMask |= (RootMask << ShiftAmtVal);
+      continue;
+    }
+
+    if (IsSrc(V)) {
+      PartsMask |= RootMask;
+      continue;
+    }
+
+    return SDValue();
+  }
+
+  if (!Src)
+    return SDValue();
+
+  SDLoc DL(Root);
+  return DAG.getNode(ISD::AND, DL, VT,
+                     {Src, DAG.getConstant(PartsMask, DL, VT)});
+}
+
 /// This is a stub for TargetLowering::SimplifySetCC.
 SDValue DAGCombiner::SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
                                    ISD::CondCode Cond, const SDLoc &DL,
                                    bool foldBooleans) {
   TargetLowering::DAGCombinerInfo
     DagCombineInfo(DAG, Level, false, this);
-  return TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL);
+  if (SDValue C =
+          TLI.SimplifySetCC(VT, N0, N1, Cond, foldBooleans, DagCombineInfo, DL))
+    return C;
+
+  if (ISD::isIntEqualitySetCC(Cond) && N0.getOpcode() == ISD::AND &&
+      isNullConstant(N1)) {
+
+    if (SDValue Res = matchMergedBFX(N0, DAG, TLI))
+      return DAG.getSetCC(DL, VT, Res, N1, Cond);
+  }
+
+  return SDValue();
 }
 
 /// Given an ISD::SDIV node expressing a divide by constant, return

diff  --git a/llvm/test/CodeGen/AMDGPU/merged-bfx-opt.ll b/llvm/test/CodeGen/AMDGPU/merged-bfx-opt.ll
new file mode 100644
index 0000000000000..144cb0d7f2b8b
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/merged-bfx-opt.ll
@@ -0,0 +1,123 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -O3 -mtriple=amdgcn -mcpu=fiji %s -o - | FileCheck %s
+
+define i1 @basic_eq_i16_3x5(i16 %arg) {
+; CHECK-LABEL: basic_eq_i16_3x5:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; CHECK-NEXT:    v_cmp_eq_u16_e32 vcc, 0, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a = and i16 %arg, 31
+  %sh5 = lshr i16 %arg, 5
+  %b = and i16 %sh5, 31
+  %or = or i16 %a, %b
+  %sh10 = lshr i16 %arg, 10
+  %c = and i16 %sh10, 31
+  %or1 = or i16 %or, %c
+  %cmp = icmp eq i16 %or1, 0
+  ret i1 %cmp
+}
+
+define i1 @basic_eq_i32_3x5(i32 %arg) {
+; CHECK-LABEL: basic_eq_i32_3x5:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a = and i32 %arg, 31
+  %sh5 = lshr i32 %arg, 5
+  %b = and i32 %sh5, 31
+  %or = or i32 %a, %b
+  %sh10 = lshr i32 %arg, 10
+  %c = and i32 %sh10, 31
+  %or1 = or i32 %or, %c
+  %cmp = icmp eq i32 %or1, 0
+  ret i1 %cmp
+}
+
+define i1 @basic_eq_i64_3x5(i64 %arg) {
+; CHECK-LABEL: basic_eq_i64_3x5:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; CHECK-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a = and i64 %arg, 31
+  %sh5 = lshr i64 %arg, 5
+  %b = and i64 %sh5, 31
+  %or = or i64 %a, %b
+  %sh10 = lshr i64 %arg, 10
+  %c = and i64 %sh10, 31
+  %or1 = or i64 %or, %c
+  %cmp = icmp eq i64 %or1, 0
+  ret i1 %cmp
+}
+
+define i1 @basic_ne_i32_3x5(i32 %arg) {
+; CHECK-LABEL: basic_ne_i32_3x5:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x7fff, v0
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a = and i32 %arg, 31
+  %sh5 = lshr i32 %arg, 5
+  %b = and i32 %sh5, 31
+  %or = or i32 %a, %b
+  %sh10 = lshr i32 %arg, 10
+  %c = and i32 %sh10, 31
+  %or1 = or i32 %or, %c
+  %cmp = icmp ne i32 %or1, 0
+  ret i1 %cmp
+}
+
+define i1 @eq_i32_3x5_holes_in_mask(i32 %arg) {
+; CHECK-LABEL: eq_i32_3x5_holes_in_mask:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x7f9f, v0
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %a = and i32 %arg, 31
+  %sh5 = lshr i32 %arg, 7
+  %b = and i32 %sh5, 31
+  %or = or i32 %a, %b
+  %sh10 = lshr i32 %arg, 10
+  %c = and i32 %sh10, 31
+  %or1 = or i32 %or, %c
+  %cmp = icmp ne i32 %or1, 0
+  ret i1 %cmp
+}
+
+define i1 @eq_i32_3x5_all_shifted(i32 %arg) {
+; CHECK-LABEL: eq_i32_3x5_all_shifted:
+; CHECK:       ; %bb.0: ; %entry
+; CHECK-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT:    v_and_b32_e32 v0, 0x7ffc, v0
+; CHECK-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
+; CHECK-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
+; CHECK-NEXT:    s_setpc_b64 s[30:31]
+entry:
+  %sh2 = lshr i32 %arg, 2
+  %a = and i32 %sh2, 31
+  %sh5 = lshr i32 %arg, 7
+  %b = and i32 %sh5, 31
+  %or = or i32 %a, %b
+  %sh10 = lshr i32 %arg, 10
+  %c = and i32 %sh10, 31
+  %or1 = or i32 %or, %c
+  %cmp = icmp ne i32 %or1, 0
+  ret i1 %cmp
+}

diff  --git a/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll b/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll
index 07c4aeb1ac7df..64d055bc40e98 100644
--- a/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll
+++ b/llvm/test/CodeGen/AMDGPU/workitem-intrinsic-opts.ll
@@ -12,11 +12,7 @@ define i1 @workitem_zero() {
 ; DAGISEL-GFX8-LABEL: workitem_zero:
 ; DAGISEL-GFX8:       ; %bb.0: ; %entry
 ; DAGISEL-GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX8-NEXT:    v_lshrrev_b32_e32 v1, 10, v31
-; DAGISEL-GFX8-NEXT:    v_lshrrev_b32_e32 v0, 20, v31
-; DAGISEL-GFX8-NEXT:    v_or_b32_e32 v1, v31, v1
-; DAGISEL-GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
-; DAGISEL-GFX8-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; DAGISEL-GFX8-NEXT:    v_and_b32_e32 v0, 0x3fffffff, v31
 ; DAGISEL-GFX8-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
 ; DAGISEL-GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; DAGISEL-GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -24,10 +20,7 @@ define i1 @workitem_zero() {
 ; DAGISEL-GFX942-LABEL: workitem_zero:
 ; DAGISEL-GFX942:       ; %bb.0: ; %entry
 ; DAGISEL-GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX942-NEXT:    v_lshrrev_b32_e32 v0, 20, v31
-; DAGISEL-GFX942-NEXT:    v_lshrrev_b32_e32 v1, 10, v31
-; DAGISEL-GFX942-NEXT:    v_or3_b32 v0, v31, v1, v0
-; DAGISEL-GFX942-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; DAGISEL-GFX942-NEXT:    v_and_b32_e32 v0, 0x3fffffff, v31
 ; DAGISEL-GFX942-NEXT:    v_cmp_eq_u32_e32 vcc, 0, v0
 ; DAGISEL-GFX942-NEXT:    s_nop 1
 ; DAGISEL-GFX942-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
@@ -40,11 +33,7 @@ define i1 @workitem_zero() {
 ; DAGISEL-GFX12-NEXT:    s_wait_samplecnt 0x0
 ; DAGISEL-GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; DAGISEL-GFX12-NEXT:    s_wait_kmcnt 0x0
-; DAGISEL-GFX12-NEXT:    v_lshrrev_b32_e32 v0, 20, v31
-; DAGISEL-GFX12-NEXT:    v_lshrrev_b32_e32 v1, 10, v31
-; DAGISEL-GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; DAGISEL-GFX12-NEXT:    v_or3_b32 v0, v31, v1, v0
-; DAGISEL-GFX12-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; DAGISEL-GFX12-NEXT:    v_and_b32_e32 v0, 0x3fffffff, v31
 ; DAGISEL-GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; DAGISEL-GFX12-NEXT:    v_cmp_eq_u32_e32 vcc_lo, 0, v0
 ; DAGISEL-GFX12-NEXT:    s_wait_alu 0xfffd
@@ -106,11 +95,7 @@ define i1 @workitem_nonzero() {
 ; DAGISEL-GFX8-LABEL: workitem_nonzero:
 ; DAGISEL-GFX8:       ; %bb.0: ; %entry
 ; DAGISEL-GFX8-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX8-NEXT:    v_lshrrev_b32_e32 v1, 10, v31
-; DAGISEL-GFX8-NEXT:    v_lshrrev_b32_e32 v0, 20, v31
-; DAGISEL-GFX8-NEXT:    v_or_b32_e32 v1, v31, v1
-; DAGISEL-GFX8-NEXT:    v_or_b32_e32 v0, v1, v0
-; DAGISEL-GFX8-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; DAGISEL-GFX8-NEXT:    v_and_b32_e32 v0, 0x3fffffff, v31
 ; DAGISEL-GFX8-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; DAGISEL-GFX8-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
 ; DAGISEL-GFX8-NEXT:    s_setpc_b64 s[30:31]
@@ -118,10 +103,7 @@ define i1 @workitem_nonzero() {
 ; DAGISEL-GFX942-LABEL: workitem_nonzero:
 ; DAGISEL-GFX942:       ; %bb.0: ; %entry
 ; DAGISEL-GFX942-NEXT:    s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; DAGISEL-GFX942-NEXT:    v_lshrrev_b32_e32 v0, 20, v31
-; DAGISEL-GFX942-NEXT:    v_lshrrev_b32_e32 v1, 10, v31
-; DAGISEL-GFX942-NEXT:    v_or3_b32 v0, v31, v1, v0
-; DAGISEL-GFX942-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; DAGISEL-GFX942-NEXT:    v_and_b32_e32 v0, 0x3fffffff, v31
 ; DAGISEL-GFX942-NEXT:    v_cmp_ne_u32_e32 vcc, 0, v0
 ; DAGISEL-GFX942-NEXT:    s_nop 1
 ; DAGISEL-GFX942-NEXT:    v_cndmask_b32_e64 v0, 0, 1, vcc
@@ -134,11 +116,7 @@ define i1 @workitem_nonzero() {
 ; DAGISEL-GFX12-NEXT:    s_wait_samplecnt 0x0
 ; DAGISEL-GFX12-NEXT:    s_wait_bvhcnt 0x0
 ; DAGISEL-GFX12-NEXT:    s_wait_kmcnt 0x0
-; DAGISEL-GFX12-NEXT:    v_lshrrev_b32_e32 v0, 20, v31
-; DAGISEL-GFX12-NEXT:    v_lshrrev_b32_e32 v1, 10, v31
-; DAGISEL-GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
-; DAGISEL-GFX12-NEXT:    v_or3_b32 v0, v31, v1, v0
-; DAGISEL-GFX12-NEXT:    v_and_b32_e32 v0, 0x3ff, v0
+; DAGISEL-GFX12-NEXT:    v_and_b32_e32 v0, 0x3fffffff, v31
 ; DAGISEL-GFX12-NEXT:    s_delay_alu instid0(VALU_DEP_1)
 ; DAGISEL-GFX12-NEXT:    v_cmp_ne_u32_e32 vcc_lo, 0, v0
 ; DAGISEL-GFX12-NEXT:    s_wait_alu 0xfffd


        


More information about the llvm-commits mailing list