[llvm] [InstCombine] Fold select(X >s 0, 0, -X) | smax(X, 0) to abs(X) (PR #165200)

Wenju He via llvm-commits llvm-commits at lists.llvm.org
Sun Oct 26 23:33:02 PDT 2025


https://github.com/wenju-he updated https://github.com/llvm/llvm-project/pull/165200

>From 4629547485d5b0c4f0a0e3e2e026222bbb654c7a Mon Sep 17 00:00:00 2001
From: Wenju He <wenju.he at intel.com>
Date: Mon, 27 Oct 2025 05:41:34 +0100
Subject: [PATCH 1/2] [InstCombine] Fold select(X >s 0, 0, -X) | smax(X, 0) to
 abs(X)

The IR pattern is compiled from OpenCL code:
  __builtin_astype(x > (uchar2)(0) ? x : -x, uchar2);
where smax is created by foldSelectInstWithICmp + canonicalizeSPF.

smax could also come from direct elementwise max call:
  int c = b > (int)(0) ? (int)(0) : -b;
  int d = __builtin_elementwise_max(b, (int)(0));
  *a = c | d;
---
 .../InstCombine/InstCombineAndOrXor.cpp       | 18 ++++++++++++
 llvm/test/Transforms/InstCombine/or.ll        | 28 +++++++++++++++++++
 2 files changed, 46 insertions(+)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 3ddf182149e57..4e863ca2c6dfd 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -3997,6 +3997,20 @@ static Value *foldOrUnsignedUMulOverflowICmp(BinaryOperator &I,
   return nullptr;
 }
 
+// Fold select(X >s 0, 0, -X) | smax(X, 0) --> abs(X)
+static Value *FoldOrOfSelectSmaxToAbs(BinaryOperator &I,
+                                      InstCombiner::BuilderTy &Builder) {
+  CmpPredicate Pred;
+  Value *X;
+  if (match(&I, m_c_Or(m_Select(m_ICmp(Pred, m_Value(X), m_ZeroInt()),
+                                m_ZeroInt(), m_Sub(m_ZeroInt(), m_Deferred(X))),
+                       m_OneUse(m_Intrinsic<Intrinsic::smax>(m_Deferred(X),
+                                                             m_ZeroInt())))) &&
+      Pred == ICmpInst::ICMP_SGT)
+    return Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse());
+  return nullptr;
+}
+
 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
 // here. We should standardize that construct where it is needed or choose some
 // other way to ensure that commutated variants of patterns are not missed.
@@ -4545,6 +4559,10 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
     if (Value *V = SimplifyAddWithRemainder(I))
       return replaceInstUsesWith(I, V);
 
+  // select(X >s 0, 0, -X) | smax(X, 0) -> abs(X)
+  if (Value *Res = FoldOrOfSelectSmaxToAbs(I, Builder))
+    return replaceInstUsesWith(I, Res);
+
   return nullptr;
 }
 
diff --git a/llvm/test/Transforms/InstCombine/or.ll b/llvm/test/Transforms/InstCombine/or.ll
index 6b090e982af0a..bbc79e8c16a56 100644
--- a/llvm/test/Transforms/InstCombine/or.ll
+++ b/llvm/test/Transforms/InstCombine/or.ll
@@ -2113,3 +2113,31 @@ define <4 x i32> @or_zext_nneg_minus_constant_splat(<4 x i8> %a) {
   %or = or <4 x i32> %zext, splat (i32 -9)
   ret <4 x i32> %or
 }
+
+define i8 @or_positive_minus_non_positive_to_abs(i8 noundef %0){
+; CHECK-LABEL: @or_positive_minus_non_positive_to_abs(
+; CHECK-NEXT:    [[TMP2:%.*]] = call i8 @llvm.abs.i8(i8 [[TMP0:%.*]], i1 false)
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %2 = icmp sgt i8 %0, zeroinitializer
+  %3 = sext i1 %2 to i8
+  %4 = sub i8 zeroinitializer, %0
+  %5 = xor i8 %3, -1
+  %6 = and i8 %4, %5
+  %7 = and i8 %0, %3
+  %8 = or i8 %6, %7
+  ret i8 %8
+}
+
+define <2 x i8> @or_select_smax_to_abs(<2 x i8> %0){
+; CHECK-LABEL: @or_select_smax_to_abs(
+; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[TMP0:%.*]], i1 false)
+; CHECK-NEXT:    ret <2 x i8> [[TMP2]]
+;
+  %2 = icmp sgt <2 x i8> %0, zeroinitializer
+  %3 = sub <2 x i8> zeroinitializer, %0
+  %4 = select <2 x i1> %2, <2 x i8> zeroinitializer, <2 x i8> %3
+  %5 = tail call <2 x i8> @llvm.smax.v2i8(<2 x i8> %0, <2 x i8> zeroinitializer)
+  %6 = or <2 x i8> %4, %5
+  ret <2 x i8> %6
+}

>From af62083f6139ac5c103e8342e8ed6a7c877edfb9 Mon Sep 17 00:00:00 2001
From: Wenju He <wenju.he at intel.com>
Date: Mon, 27 Oct 2025 07:32:30 +0100
Subject: [PATCH 2/2] named value, add negative test with multiple uses of
 @llvm.smax

---
 llvm/test/Transforms/InstCombine/or.ll | 55 ++++++++++++++++++--------
 1 file changed, 39 insertions(+), 16 deletions(-)

diff --git a/llvm/test/Transforms/InstCombine/or.ll b/llvm/test/Transforms/InstCombine/or.ll
index bbc79e8c16a56..dcc8a712cd835 100644
--- a/llvm/test/Transforms/InstCombine/or.ll
+++ b/llvm/test/Transforms/InstCombine/or.ll
@@ -2114,30 +2114,53 @@ define <4 x i32> @or_zext_nneg_minus_constant_splat(<4 x i8> %a) {
   ret <4 x i32> %or
 }
 
-define i8 @or_positive_minus_non_positive_to_abs(i8 noundef %0){
+define i8 @or_positive_minus_non_positive_to_abs(i8 %a){
 ; CHECK-LABEL: @or_positive_minus_non_positive_to_abs(
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i8 @llvm.abs.i8(i8 [[TMP0:%.*]], i1 false)
 ; CHECK-NEXT:    ret i8 [[TMP2]]
 ;
-  %2 = icmp sgt i8 %0, zeroinitializer
-  %3 = sext i1 %2 to i8
-  %4 = sub i8 zeroinitializer, %0
-  %5 = xor i8 %3, -1
-  %6 = and i8 %4, %5
-  %7 = and i8 %0, %3
-  %8 = or i8 %6, %7
-  ret i8 %8
+  %b = icmp sgt i8 %a, zeroinitializer
+  %mask = sext i1 %b to i8
+  %neg = sub i8 zeroinitializer, %a
+  %mask_inv = xor i8 %mask, -1
+  %c = and i8 %neg, %mask_inv
+  %d = and i8 %a, %mask
+  %or = or i8 %c, %d
+  ret i8 %or
 }
 
-define <2 x i8> @or_select_smax_to_abs(<2 x i8> %0){
+define <2 x i8> @or_select_smax_to_abs(<2 x i8> %a){
 ; CHECK-LABEL: @or_select_smax_to_abs(
 ; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i8> @llvm.abs.v2i8(<2 x i8> [[TMP0:%.*]], i1 false)
 ; CHECK-NEXT:    ret <2 x i8> [[TMP2]]
 ;
-  %2 = icmp sgt <2 x i8> %0, zeroinitializer
-  %3 = sub <2 x i8> zeroinitializer, %0
-  %4 = select <2 x i1> %2, <2 x i8> zeroinitializer, <2 x i8> %3
-  %5 = tail call <2 x i8> @llvm.smax.v2i8(<2 x i8> %0, <2 x i8> zeroinitializer)
-  %6 = or <2 x i8> %4, %5
-  ret <2 x i8> %6
+  %sgt0 = icmp sgt <2 x i8> %a, zeroinitializer
+  %neg = sub <2 x i8> zeroinitializer, %a
+  %sel = select <2 x i1> %sgt0, <2 x i8> zeroinitializer, <2 x i8> %neg
+  %max = call <2 x i8> @llvm.smax.v2i8(<2 x i8> %a, <2 x i8> zeroinitializer)
+  %or = or <2 x i8> %sel, %max
+  ret <2 x i8> %or
+}
+
+declare <2 x i8> @llvm.abs.v2i8(<2 x i8>, i1)
+
+; negative test - %d has multiple uses. %or is not folded to abs.
+
+define <2 x i8> @or_select_smax_multi_uses(<2 x i8> %a){
+; CHECK-LABEL: @or_select_smax_multi_uses(
+; CHECK-NEXT:    [[B:%.*]] = icmp sgt <2 x i8> [[A:%.*]], zeroinitializer
+; CHECK-NEXT:    [[NEG:%.*]] = sub <2 x i8> zeroinitializer, [[A]]
+; CHECK-NEXT:    [[C:%.*]] = select <2 x i1> [[B]], <2 x i8> zeroinitializer, <2 x i8> [[NEG]]
+; CHECK-NEXT:    [[D:%.*]] = call <2 x i8> @llvm.smax.v2i8(<2 x i8> [[A]], <2 x i8> zeroinitializer)
+; CHECK-NEXT:    [[OR1:%.*]] = or <2 x i8> [[C]], [[D]]
+; CHECK-NEXT:    [[OR:%.*]] = add <2 x i8> [[OR1]], [[D]]
+; CHECK-NEXT:    ret <2 x i8> [[OR]]
+;
+  %sgt0 = icmp sgt <2 x i8> %a, zeroinitializer
+  %neg = sub <2 x i8> zeroinitializer, %a
+  %sel = select <2 x i1> %sgt0, <2 x i8> zeroinitializer, <2 x i8> %neg
+  %max = call <2 x i8> @llvm.smax.v2i8(<2 x i8> %a, <2 x i8> zeroinitializer)
+  %or = or <2 x i8> %sel, %max
+  %e = add <2 x i8> %or, %max
+  ret <2 x i8> %e
 }



More information about the llvm-commits mailing list