[llvm] 9cf3e31 - [InstCombine] Explicitly fold `~(~X >>u Y)` into `X >>s Y` (#75473)

via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 14 07:06:42 PST 2023


Author: Yingwei Zheng
Date: 2023-12-14T23:06:38+08:00
New Revision: 9cf3e31172b7b9966ce95d6a84ca09f8d0f6ebc1

URL: https://github.com/llvm/llvm-project/commit/9cf3e31172b7b9966ce95d6a84ca09f8d0f6ebc1
DIFF: https://github.com/llvm/llvm-project/commit/9cf3e31172b7b9966ce95d6a84ca09f8d0f6ebc1.diff

LOG: [InstCombine] Explicitly fold `~(~X >>u Y)` into `X >>s Y` (#75473)

Fixes #75369.

This patch explicitly folds `~(~X >>u Y)` into `X >>s Y` to fix assertion failure in #75369.

Added: 
    llvm/test/Transforms/InstCombine/pr75369.ll

Modified: 
    llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
    llvm/lib/Transforms/InstCombine/InstructionCombining.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 6002f599ca71a..31db1d3164b77 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -4280,6 +4280,12 @@ Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
     if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
       return BinaryOperator::CreateAShr(X, Y);
 
+    // Treat lshr with non-negative operand as ashr.
+    // ~(~X >>u Y) --> (X >>s Y) iff X is known negative
+    if (match(NotVal, m_LShr(m_Not(m_Value(X)), m_Value(Y))) &&
+        isKnownNegative(X, SQ.getWithInstruction(NotVal)))
+      return BinaryOperator::CreateAShr(X, Y);
+
     // Bit-hack form of a signbit test for iN type:
     // ~(X >>s (N - 1)) --> sext i1 (X > -1) to iN
     unsigned FullShift = Ty->getScalarSizeInBits() - 1;

diff  --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index ba40ee7693636..6ac1fdb9252bf 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -2190,16 +2190,6 @@ Value *InstCombiner::getFreelyInvertedImpl(Value *V, bool WillInvertAllUses,
     return nullptr;
   }
 
-  // Treat lshr with non-negative operand as ashr.
-  if (match(V, m_LShr(m_Value(A), m_Value(B))) &&
-      isKnownNonNegative(A, SQ.getWithInstruction(cast<Instruction>(V)),
-                         Depth)) {
-    if (auto *AV = getFreelyInvertedImpl(A, A->hasOneUse(), Builder,
-                                         DoesConsume, Depth))
-      return Builder ? Builder->CreateAShr(AV, B) : NonNull;
-    return nullptr;
-  }
-
   Value *Cond;
   // LogicOps are special in that we canonicalize them at the cost of an
   // instruction.

diff  --git a/llvm/test/Transforms/InstCombine/pr75369.ll b/llvm/test/Transforms/InstCombine/pr75369.ll
new file mode 100644
index 0000000000000..2f90753504b36
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/pr75369.ll
@@ -0,0 +1,36 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -passes=instcombine -S < %s | FileCheck %s
+
+define i32 @main(ptr %a, i8 %a0, i32 %conv, i8 %a1) {
+; CHECK-LABEL: define i32 @main(
+; CHECK-SAME: ptr [[A:%.*]], i8 [[A0:%.*]], i32 [[CONV:%.*]], i8 [[A1:%.*]]) {
+; CHECK-NEXT:    [[A3:%.*]] = trunc i32 [[CONV]] to i8
+; CHECK-NEXT:    [[OR11:%.*]] = or i8 [[A3]], [[A0]]
+; CHECK-NEXT:    store i8 [[OR11]], ptr [[A]], align 1
+; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i8 [[A1]], 0
+; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
+; CHECK-NEXT:    ret i32 [[CONV]]
+;
+  %conv1 = sext i8 %a1 to i32
+  %a2 = xor i32 %conv, 1
+  %or = or i32 %conv1, %conv
+  %not = xor i32 %or, -1
+  %shr = lshr i32 %not, 1
+  %add.neg3 = sub i32 %a2, %shr
+  %conv24 = trunc i32 %add.neg3 to i8
+  store i8 %conv24, ptr %a, align 1
+  %sext = shl i32 %conv, 0
+  %conv3 = ashr i32 %sext, 0
+  %a3 = trunc i32 %conv to i8
+  %conv5 = or i8 %a3, 0
+  %xor6 = xor i8 %conv5, 0
+  %xor816 = xor i8 %a0, 0
+  %a4 = xor i8 %xor816, 0
+  %or11 = or i8 %xor6, %a4
+  store i8 %or11, ptr %a, align 1
+  %cmp = icmp slt i8 %a1, 0
+  call void @llvm.assume(i1 %cmp)
+  ret i32 %conv3
+}
+
+declare void @llvm.assume(i1)


        


More information about the llvm-commits mailing list