[llvm] [InstCombine] Fold `ucmp/scmp(x, y) >> N` to `zext/sext(x < y)` when N is one less than the width of the result of `ucmp/scmp` (PR #104009)

via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 14 07:30:55 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-transforms

Author: Volodymyr Vasylkun (Poseydon42)

<details>
<summary>Changes</summary>

Proof: https://alive2.llvm.org/ce/z/4diUqN

This would help with the regression that's currently blocking #<!-- -->103833 

---
Full diff: https://github.com/llvm/llvm-project/pull/104009.diff


2 Files Affected:

- (modified) llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp (+18) 
- (added) llvm/test/Transforms/InstCombine/lshr-ashr-of-uscmp.ll (+95) 


``````````diff
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 38f8a41214b682..74d67e5c5a9a16 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -511,6 +511,24 @@ Instruction *InstCombinerImpl::commonShiftTransforms(BinaryOperator &I) {
   if (match(Op1, m_Or(m_Value(), m_SpecificInt(BitWidth - 1))))
     return replaceOperand(I, 1, ConstantInt::get(Ty, BitWidth - 1));
 
+  Instruction *CmpIntr;
+  const APInt *ShiftAmount;
+  if ((I.getOpcode() == Instruction::LShr ||
+       I.getOpcode() == Instruction::AShr) &&
+      match(Op0, m_Instruction(CmpIntr)) && CmpIntr->hasOneUse() &&
+      isa<CmpIntrinsic>(CmpIntr) && match(Op1, m_APInt(ShiftAmount)) &&
+      *ShiftAmount + 1 == Ty->getIntegerBitWidth()) {
+    Value *Cmp = Builder.CreateICmp(
+        cast<CmpIntrinsic>(CmpIntr)->isSigned() ? ICmpInst::ICMP_SLT
+                                                : ICmpInst::ICMP_ULT,
+        CmpIntr->getOperand(0), CmpIntr->getOperand(1));
+    Instruction *CmpExt =
+        CastInst::Create(I.getOpcode() == Instruction::LShr ? Instruction::ZExt
+                                                            : Instruction::SExt,
+                         Cmp, Ty);
+    return CmpExt;
+  }
+
   return nullptr;
 }
 
diff --git a/llvm/test/Transforms/InstCombine/lshr-ashr-of-uscmp.ll b/llvm/test/Transforms/InstCombine/lshr-ashr-of-uscmp.ll
new file mode 100644
index 00000000000000..62043d1af0c54c
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/lshr-ashr-of-uscmp.ll
@@ -0,0 +1,95 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+declare void @use(i8 %val)
+
+; ucmp/scmp(x, y) >> N folds to either zext(x < y) or sext(x < y)
+; if N is one less than the width of result of ucmp/scmp
+define i8 @ucmp_to_zext(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @ucmp_to_zext(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[X]], [[Y]]
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i8
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  %2 = lshr i8 %1, 7
+  ret i8 %2
+}
+
+define i8 @ucmp_to_sext(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @ucmp_to_sext(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp ult i32 [[X]], [[Y]]
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i1 [[TMP1]] to i8
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  %2 = ashr i8 %1, 7
+  ret i8 %2
+}
+
+define i8 @scmp_to_zext(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_to_zext(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[X]], [[Y]]
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i8
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  %2 = lshr i8 %1, 7
+  ret i8 %2
+}
+
+define i8 @scmp_to_sext(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @scmp_to_sext(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = icmp slt i32 [[X]], [[Y]]
+; CHECK-NEXT:    [[TMP2:%.*]] = sext i1 [[TMP1]] to i8
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.scmp(i32 %x, i32 %y)
+  %2 = ashr i8 %1, 7
+  ret i8 %2
+}
+
+; Negative test: incorrect shift amount
+define i8 @ucmp_to_zext_neg1(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @ucmp_to_zext_neg1(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i8 [[TMP1]], 5
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  %2 = lshr i8 %1, 5
+  ret i8 %2
+}
+
+; Negative test: shift amount is not a constant
+define i8 @ucmp_to_zext_neg2(i32 %x, i32 %y, i8 %s) {
+; CHECK-LABEL: define i8 @ucmp_to_zext_neg2(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]], i8 [[S:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i8 [[TMP1]], [[S]]
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  %2 = lshr i8 %1, %s
+  ret i8 %2
+}
+
+; Negative test: the result of ucmp/scmp is used more than once
+define i8 @ucmp_to_zext_neg3(i32 %x, i32 %y) {
+; CHECK-LABEL: define i8 @ucmp_to_zext_neg3(
+; CHECK-SAME: i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = call i8 @llvm.ucmp.i8.i32(i32 [[X]], i32 [[Y]])
+; CHECK-NEXT:    call void @use(i8 [[TMP1]])
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i8 [[TMP1]], 7
+; CHECK-NEXT:    ret i8 [[TMP2]]
+;
+  %1 = call i8 @llvm.ucmp(i32 %x, i32 %y)
+  call void @use(i8 %1)
+  %2 = lshr i8 %1, 7
+  ret i8 %2
+}

``````````

</details>


https://github.com/llvm/llvm-project/pull/104009


More information about the llvm-commits mailing list