[llvm] 1e202e8 - [InstCombine] fold shift-of-srem-by-2 to mask+shift
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 20 14:10:26 PDT 2021
Author: Sanjay Patel
Date: 2021-04-20T17:10:16-04:00
New Revision: 1e202e8f393a8bf8a9066482e66ffe2135dde9bf
URL: https://github.com/llvm/llvm-project/commit/1e202e8f393a8bf8a9066482e66ffe2135dde9bf
DIFF: https://github.com/llvm/llvm-project/commit/1e202e8f393a8bf8a9066482e66ffe2135dde9bf.diff
LOG: [InstCombine] fold shift-of-srem-by-2 to mask+shift
There are several potential srem-by-2 folds
because the result is known {-1,0,1}.
https://alive2.llvm.org/ce/z/LuVyeK
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
llvm/test/Transforms/InstCombine/lshr.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 522374017181..402e21eb6561 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1137,11 +1137,19 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
}
}
- // lshr i32 (X -nsw Y), 31 --> zext (X < Y)
Value *Y;
- if (ShAmt == BitWidth - 1 &&
- match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
- return new ZExtInst(Builder.CreateICmpSLT(X, Y), Ty);
+ if (ShAmt == BitWidth - 1) {
+ // lshr i32 (X -nsw Y), 31 --> zext (X < Y)
+ if (match(Op0, m_OneUse(m_NSWSub(m_Value(X), m_Value(Y)))))
+ return new ZExtInst(Builder.CreateICmpSLT(X, Y), Ty);
+
+ // Check if a number is negative and odd:
+ // lshr i32 (srem X, 2), 31 --> and (X >> 31), X
+ if (match(Op0, m_OneUse(m_SRem(m_Value(X), m_SpecificInt(2))))) {
+ Value *Signbit = Builder.CreateLShr(X, ShAmt);
+ return BinaryOperator::CreateAnd(Signbit, X);
+ }
+ }
if (match(Op0, m_LShr(m_Value(X), m_APInt(ShOp1)))) {
unsigned AmtSum = ShAmt + ShOp1->getZExtValue();
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index 9311f68e808d..b4ac2715101c 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -328,8 +328,8 @@ define i32 @mul_splat_fold_no_nuw(i32 %x) {
define i32 @negative_and_odd(i32 %x) {
; CHECK-LABEL: @negative_and_odd(
-; CHECK-NEXT: [[S:%.*]] = srem i32 [[X:%.*]], 2
-; CHECK-NEXT: [[R:%.*]] = lshr i32 [[S]], 31
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X:%.*]], 31
+; CHECK-NEXT: [[R:%.*]] = and i32 [[TMP1]], [[X]]
; CHECK-NEXT: ret i32 [[R]]
;
%s = srem i32 %x, 2
@@ -339,8 +339,8 @@ define i32 @negative_and_odd(i32 %x) {
define <2 x i7> @negative_and_odd_vec(<2 x i7> %x) {
; CHECK-LABEL: @negative_and_odd_vec(
-; CHECK-NEXT: [[S:%.*]] = srem <2 x i7> [[X:%.*]], <i7 2, i7 2>
-; CHECK-NEXT: [[R:%.*]] = lshr <2 x i7> [[S]], <i7 6, i7 6>
+; CHECK-NEXT: [[TMP1:%.*]] = lshr <2 x i7> [[X:%.*]], <i7 6, i7 6>
+; CHECK-NEXT: [[R:%.*]] = and <2 x i7> [[TMP1]], [[X]]
; CHECK-NEXT: ret <2 x i7> [[R]]
;
%s = srem <2 x i7> %x, <i7 2, i7 2>
@@ -348,6 +348,8 @@ define <2 x i7> @negative_and_odd_vec(<2 x i7> %x) {
ret <2 x i7> %r
}
+; Negative test - this is still worth trying to avoid srem?
+
define i32 @negative_and_odd_uses(i32 %x, i32* %p) {
; CHECK-LABEL: @negative_and_odd_uses(
; CHECK-NEXT: [[S:%.*]] = srem i32 [[X:%.*]], 2
@@ -361,6 +363,8 @@ define i32 @negative_and_odd_uses(i32 %x, i32* %p) {
ret i32 %r
}
+; Negative test - wrong divisor
+
define i32 @srem3(i32 %x) {
; CHECK-LABEL: @srem3(
; CHECK-NEXT: [[S:%.*]] = srem i32 [[X:%.*]], 3
@@ -372,6 +376,8 @@ define i32 @srem3(i32 %x) {
ret i32 %r
}
+; Negative test - wrong shift amount
+
define i32 @srem2_lshr30(i32 %x) {
; CHECK-LABEL: @srem2_lshr30(
; CHECK-NEXT: [[S:%.*]] = srem i32 [[X:%.*]], 2
More information about the llvm-commits
mailing list