[llvm] [InstCombine] Resolve TODO: Remove one-time check if other logic operand (Y) is constant (PR #77973)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jan 18 17:31:14 PST 2024


https://github.com/AtariDreams updated https://github.com/llvm/llvm-project/pull/77973

>From 8ce1f22830f5ebd4dd42c217f944909c0b2b54c8 Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Sun, 14 Jan 2024 12:53:37 -0500
Subject: [PATCH 1/2] [InstCombine] Add pre-commit tests [NFC]

---
 .../Transforms/InstCombine/shift-logic.ll     | 127 +++++++++++++++++-
 1 file changed, 125 insertions(+), 2 deletions(-)

diff --git a/llvm/test/Transforms/InstCombine/shift-logic.ll b/llvm/test/Transforms/InstCombine/shift-logic.ll
index 544694d398431e..d4d6273331ca59 100644
--- a/llvm/test/Transforms/InstCombine/shift-logic.ll
+++ b/llvm/test/Transforms/InstCombine/shift-logic.ll
@@ -1,8 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
 
-declare void @use(i64)
-
 define i8 @shl_and(i8 %x, i8 %y) {
 ; CHECK-LABEL: @shl_and(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i8 [[X:%.*]], 5
@@ -16,6 +14,8 @@ define i8 @shl_and(i8 %x, i8 %y) {
   ret i8 %sh1
 }
 
+declare void @use(i8)
+
 define <2 x i8> @shl_and_nonuniform(<2 x i8> %x, <2 x i8> %y) {
 ; CHECK-LABEL: @shl_and_nonuniform(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i8> [[X:%.*]], <i8 5, i8 4>
@@ -29,6 +29,23 @@ define <2 x i8> @shl_and_nonuniform(<2 x i8> %x, <2 x i8> %y) {
   ret <2 x i8> %sh1
 }
 
+define <2 x i8> @shl_and_nonuniform_multiuse(<2 x i8> %x) {
+; CHECK-LABEL: @shl_and_nonuniform_multiuse(
+; CHECK-NEXT:    [[SH0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 3, i8 4>
+; CHECK-NEXT:    call void @use1(<2 x i8> [[SH0]])
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i8> [[X]], <i8 5, i8 4>
+; CHECK-NEXT:    [[SH1:%.*]] = and <2 x i8> [[TMP1]], <i8 -88, i8 42>
+; CHECK-NEXT:    ret <2 x i8> [[SH1]]
+;
+  %sh0 = shl <2 x i8> %x, <i8 3, i8 4>
+  %r = and <2 x i8> %sh0, <i8 42, i8 42> ; constant operand on the 'and'
+  call void @use1(<2 x i8> %sh0)
+  %sh1 = shl <2 x i8> %r, <i8 2, i8 0>
+  ret <2 x i8> %sh1
+}
+
+declare void @use1(<2 x i8>)
+
 define i16 @shl_or(i16 %x, i16 %py) {
 ; CHECK-LABEL: @shl_or(
 ; CHECK-NEXT:    [[Y:%.*]] = srem i16 [[PY:%.*]], 42
@@ -59,6 +76,23 @@ define <2 x i16> @shl_or_undef(<2 x i16> %x, <2 x i16> %py) {
   ret <2 x i16> %sh1
 }
 
+define <2 x i16> @shl_or_undef_multiuse(<2 x i16> %x) {
+; CHECK-LABEL: @shl_or_undef_multiuse(
+; CHECK-NEXT:    [[SH0:%.*]] = shl <2 x i16> [[X:%.*]], <i16 5, i16 undef>
+; CHECK-NEXT:    call void @use2(<2 x i16> [[SH0]])
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i16> [[X]], <i16 12, i16 undef>
+; CHECK-NEXT:    [[SH1:%.*]] = or <2 x i16> [[TMP1]], <i16 5376, i16 poison>
+; CHECK-NEXT:    ret <2 x i16> [[SH1]]
+;
+  %sh0 = shl <2 x i16> %x, <i16 5, i16 undef>
+  %r = or <2 x i16> <i16 42, i16 42>, %sh0 ; constant operand on the 'or'
+  call void @use2(<2 x i16> %sh0)
+  %sh1 = shl <2 x i16> %r, <i16 7, i16 undef>
+  ret <2 x i16> %sh1
+}
+
+declare void @use2(<2 x i16>)
+
 define i32 @shl_xor(i32 %x, i32 %y) {
 ; CHECK-LABEL: @shl_xor(
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[X:%.*]], 12
@@ -85,6 +119,23 @@ define <2 x i32> @shl_xor_nonuniform(<2 x i32> %x, <2 x i32> %y) {
   ret <2 x i32> %sh1
 }
 
+declare void @use3(<2 x i32>)
+
+define <2 x i32> @shl_xor_nonuniform_multiuse(<2 x i32> %x) {
+; CHECK-LABEL: @shl_xor_nonuniform_multiuse(
+; CHECK-NEXT:    [[SH0:%.*]] = shl <2 x i32> [[X:%.*]], <i32 5, i32 6>
+; CHECK-NEXT:    call void @use3(<2 x i32> [[SH0]])
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i32> [[X]], <i32 12, i32 14>
+; CHECK-NEXT:    [[SH1:%.*]] = xor <2 x i32> [[TMP1]], <i32 5376, i32 10752>
+; CHECK-NEXT:    ret <2 x i32> [[SH1]]
+;
+  %sh0 = shl <2 x i32> %x, <i32 5, i32 6>
+  %r = xor <2 x i32> <i32 42, i32 42>, %sh0 ; constant operand on the 'xor'
+  call void @use3(<2 x i32> %sh0)
+  %sh1 = shl <2 x i32> %r, <i32 7, i32 8>
+  ret <2 x i32> %sh1
+}
+
 define i64 @lshr_and(i64 %x, i64 %py) {
 ; CHECK-LABEL: @lshr_and(
 ; CHECK-NEXT:    [[Y:%.*]] = srem i64 [[PY:%.*]], 42
@@ -115,6 +166,21 @@ define <2 x i64> @lshr_and_undef(<2 x i64> %x, <2 x i64> %py) {
   ret <2 x i64> %sh1
 }
 
+define <2 x i64> @lshr_and_undef_multiuse(<2 x i64> %x) {
+; CHECK-LABEL: @lshr_and_undef_multiuse(
+; CHECK-NEXT:    [[SH0:%.*]] = lshr <2 x i64> [[X:%.*]], <i64 5, i64 undef>
+; CHECK-NEXT:    call void @use4(<2 x i64> [[SH0]])
+; CHECK-NEXT:    ret <2 x i64> zeroinitializer
+;
+  %sh0 = lshr <2 x i64> %x, <i64 5, i64 undef>
+  %r = and <2 x i64> <i64 42, i64 42>, %sh0 ; constant operand on the 'and'
+  call void @use4(<2 x i64> %sh0)
+  %sh1 = lshr <2 x i64> %r, <i64 7, i64 undef>
+  ret <2 x i64> %sh1
+}
+
+declare void @use4(<2 x i64>)
+
 define <4 x i32> @lshr_or(<4 x i32> %x, <4 x i32> %y) {
 ; CHECK-LABEL: @lshr_or(
 ; CHECK-NEXT:    [[TMP1:%.*]] = lshr <4 x i32> [[X:%.*]], <i32 12, i32 12, i32 12, i32 12>
@@ -359,6 +425,21 @@ define <2 x i8> @shl_add_nonuniform(<2 x i8> %x, <2 x i8> %y) {
   ret <2 x i8> %sh1
 }
 
+define <2 x i8> @shl_add_nonuniform_multiuse(<2 x i8> %x) {
+; CHECK-LABEL: @shl_add_nonuniform_multiuse(
+; CHECK-NEXT:    [[SH0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 3, i8 4>
+; CHECK-NEXT:    call void @use(<2 x i8> [[SH0]])
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i8> [[X]], <i8 5, i8 4>
+; CHECK-NEXT:    [[SH1:%.*]] = add <2 x i8> [[TMP1]], <i8 -88, i8 42>
+; CHECK-NEXT:    ret <2 x i8> [[SH1]]
+;
+  %sh0 = shl <2 x i8> %x, <i8 3, i8 4>
+  %r = add <2 x i8> %sh0, <i8 42, i8 42> ; constant operand on the 'add'
+  call void @use(<2 x i8> %sh0)
+  %sh1 = shl <2 x i8> %r, <i8 2, i8 0>
+  ret <2 x i8> %sh1
+}
+
 
 define <2 x i64> @shl_add_undef(<2 x i64> %x, <2 x i64> %py) {
 ; CHECK-LABEL: @shl_add_undef(
@@ -375,6 +456,20 @@ define <2 x i64> @shl_add_undef(<2 x i64> %x, <2 x i64> %py) {
   ret <2 x i64> %sh1
 }
 
+define <2 x i64> @shl_add_undef_multiuse(<2 x i64> %x) {
+; CHECK-LABEL: @shl_add_undef_multiuse(
+; CHECK-NEXT:    [[SH0:%.*]] = shl <2 x i64> [[X:%.*]], <i64 5, i64 undef>
+; CHECK-NEXT:    call void @use4(<2 x i64> [[SH0]])
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i64> [[X]], <i64 12, i64 undef>
+; CHECK-NEXT:    [[SH1:%.*]] = add <2 x i64> [[TMP1]], <i64 5376, i64 poison>
+; CHECK-NEXT:    ret <2 x i64> [[SH1]]
+;
+  %sh0 = shl <2 x i64> %x, <i64 5, i64 undef>
+  %r = add <2 x i64> <i64 42, i64 42>, %sh0 ; constant operand on the 'add'
+  call void @use4(<2 x i64> %sh0)
+  %sh1 = shl <2 x i64> %r, <i64 7, i64 undef>
+  ret <2 x i64> %sh1
+}
 
 define i8 @lshr_add(i8 %x, i8 %y) {
 ; CHECK-LABEL: @lshr_add(
@@ -457,6 +552,20 @@ define <2 x i8> @shl_sub_nonuniform(<2 x i8> %x, <2 x i8> %y) {
   ret <2 x i8> %sh1
 }
 
+define <2 x i8> @shl_sub_nonuniform_multiuse(<2 x i8> %x) {
+; CHECK-LABEL: @shl_sub_nonuniform_multiuse(
+; CHECK-NEXT:    [[SH0:%.*]] = shl <2 x i8> [[X:%.*]], <i8 3, i8 4>
+; CHECK-NEXT:    call void @use1(<2 x i8> [[SH0]])
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i8> [[X]], <i8 5, i8 4>
+; CHECK-NEXT:    [[SH1:%.*]] = add <2 x i8> [[TMP1]], <i8 88, i8 -42>
+; CHECK-NEXT:    ret <2 x i8> [[SH1]]
+;
+  %sh0 = shl <2 x i8> %x, <i8 3, i8 4>
+  %r = sub <2 x i8> %sh0, <i8 42, i8 42>
+  call void @use1(<2 x i8> %sh0)
+  %sh1 = shl <2 x i8> %r, <i8 2, i8 0>
+  ret <2 x i8> %sh1
+}
 
 define <2 x i64> @shl_sub_undef(<2 x i64> %x, <2 x i64> %py) {
 ; CHECK-LABEL: @shl_sub_undef(
@@ -473,6 +582,20 @@ define <2 x i64> @shl_sub_undef(<2 x i64> %x, <2 x i64> %py) {
   ret <2 x i64> %sh1
 }
 
+define <2 x i64> @shl_sub_undef_multiuse(<2 x i64> %x) {
+; CHECK-LABEL: @shl_sub_undef_multiuse(
+; CHECK-NEXT:    [[SH0:%.*]] = shl <2 x i64> [[X:%.*]], <i64 5, i64 undef>
+; CHECK-NEXT:    call void @use4(<2 x i64> [[SH0]])
+; CHECK-NEXT:    [[TMP1:%.*]] = shl <2 x i64> [[X]], <i64 12, i64 undef>
+; CHECK-NEXT:    [[SH1:%.*]] = sub <2 x i64> <i64 5376, i64 poison>, [[TMP1]]
+; CHECK-NEXT:    ret <2 x i64> [[SH1]]
+;
+  %sh0 = shl <2 x i64> %x, <i64 5, i64 undef>
+  %r = sub <2 x i64> <i64 42, i64 42>, %sh0 ; constant operand on the 'sub'
+  call void @use4(<2 x i64> %sh0)
+  %sh1 = shl <2 x i64> %r, <i64 7, i64 undef>
+  ret <2 x i64> %sh1
+}
 
 define i8 @lshr_sub(i8 %x, i8 %y) {
 ; CHECK-LABEL: @lshr_sub(

>From 3e0106dc16fdcd258bf0b1063d3c2b17d9a479c6 Mon Sep 17 00:00:00 2001
From: Rose <83477269+AtariDreams at users.noreply.github.com>
Date: Fri, 12 Jan 2024 15:15:27 -0500
Subject: [PATCH 2/2] [Transforms] Remove one-time check if other logic operand
 (Y) is constant

By using match(W, m_ImmConstant()), we do not need to worry about one-time use anymore.
---
 .../InstCombine/InstCombineShifts.cpp         | 27 ++++++++++---------
 1 file changed, 15 insertions(+), 12 deletions(-)

diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index b7958978c450c9..01eb44154d9c56 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -368,12 +368,11 @@ static Instruction *foldShiftOfShiftedBinOp(BinaryOperator &I,
 
   // Find a matching one-use shift by constant. The fold is not valid if the sum
   // of the shift values equals or exceeds bitwidth.
-  // TODO: Remove the one-use check if the other logic operand (Y) is constant.
   Value *X, *Y;
-  auto matchFirstShift = [&](Value *V) {
+  auto matchFirstShift = [&](Value *V, Value *W) {
     APInt Threshold(Ty->getScalarSizeInBits(), Ty->getScalarSizeInBits());
-    return match(V,
-                 m_OneUse(m_BinOp(ShiftOpcode, m_Value(X), m_Constant(C0)))) &&
+    return match(V, m_BinOp(ShiftOpcode, m_Value(X), m_Constant(C0))) &&
+           (V->hasOneUse() && match(W, m_ImmConstant())) &&
            match(ConstantExpr::getAdd(C0, C1),
                  m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold));
   };
@@ -382,9 +381,9 @@ static Instruction *foldShiftOfShiftedBinOp(BinaryOperator &I,
   // is not so we cannot reoder if we match operand(1) and need to keep the
   // operands in their original positions.
   bool FirstShiftIsOp1 = false;
-  if (matchFirstShift(BinInst->getOperand(0)))
+  if (matchFirstShift(BinInst->getOperand(0), BinInst->getOperand(1)))
     Y = BinInst->getOperand(1);
-  else if (matchFirstShift(BinInst->getOperand(1))) {
+  else if (matchFirstShift(BinInst->getOperand(1), BinInst->getOperand(0))) {
     Y = BinInst->getOperand(0);
     FirstShiftIsOp1 = BinInst->getOpcode() == Instruction::Sub;
   } else
@@ -565,14 +564,17 @@ static bool canEvaluateShifted(Value *V, unsigned NumBits, bool IsLeftShift,
     return true;
 
   Instruction *I = dyn_cast<Instruction>(V);
-  if (!I) return false;
+  if (!I)
+    return false;
 
   // We can't mutate something that has multiple uses: doing so would
   // require duplicating the instruction in general, which isn't profitable.
-  if (!I->hasOneUse()) return false;
+  if (!I->hasOneUse())
+    return false;
 
   switch (I->getOpcode()) {
-  default: return false;
+  default:
+    return false;
   case Instruction::And:
   case Instruction::Or:
   case Instruction::Xor:
@@ -689,7 +691,8 @@ static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
   IC.addToWorklist(I);
 
   switch (I->getOpcode()) {
-  default: llvm_unreachable("Inconsistency with CanEvaluateShifted");
+  default:
+    llvm_unreachable("Inconsistency with CanEvaluateShifted");
   case Instruction::And:
   case Instruction::Or:
   case Instruction::Xor:
@@ -727,8 +730,8 @@ static Value *getShiftedValue(Value *V, unsigned NumBits, bool isLeftShift,
     IC.InsertNewInstWith(Neg, I->getIterator());
     unsigned TypeWidth = I->getType()->getScalarSizeInBits();
     APInt Mask = APInt::getLowBitsSet(TypeWidth, TypeWidth - NumBits);
-    auto *And = BinaryOperator::CreateAnd(Neg,
-                                          ConstantInt::get(I->getType(), Mask));
+    auto *And =
+        BinaryOperator::CreateAnd(Neg, ConstantInt::get(I->getType(), Mask));
     And->takeName(I);
     return IC.InsertNewInstWith(And, I->getIterator());
   }



More information about the llvm-commits mailing list