[llvm] 43a59be - [CVP] Expand bound `udiv`'s, symmetrically with `urem`'s
Roman Lebedev via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 20 10:53:41 PST 2023
Author: Roman Lebedev
Date: 2023-01-20T21:52:16+03:00
New Revision: 43a59be45d24497dce4795742aacf19987ce3a5c
URL: https://github.com/llvm/llvm-project/commit/43a59be45d24497dce4795742aacf19987ce3a5c
DIFF: https://github.com/llvm/llvm-project/commit/43a59be45d24497dce4795742aacf19987ce3a5c.diff
LOG: [CVP] Expand bound `udiv`'s, symmetrically with `urem`'s
Symmetrical with the `urem` case, added in 66efb986322b206834e7c9e1eb777fa053912c39.
Simple case: https://alive2.llvm.org/ce/z/gRumLd / https://alive2.llvm.org/ce/z/rxEeC5
Second variant of precondition: https://alive2.llvm.org/ce/z/cAm9TD
Added:
Modified:
llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
llvm/test/Transforms/CorrelatedValuePropagation/udiv-expansion.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
index 52f1e3556fc70..90b4b521e7de2 100644
--- a/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
+++ b/llvm/lib/Transforms/Scalar/CorrelatedValuePropagation.cpp
@@ -94,7 +94,8 @@ STATISTIC(NumSaturating,
"Number of saturating arithmetics converted to normal arithmetics");
STATISTIC(NumNonNull, "Number of function pointer arguments marked non-null");
STATISTIC(NumMinMax, "Number of llvm.[us]{min,max} intrinsics removed");
-STATISTIC(NumURemExpanded, "Number of bound urem's expanded");
+STATISTIC(NumUDivURemsNarrowedExpanded,
+ "Number of bound udiv's/urem's expanded");
namespace {
@@ -752,19 +753,23 @@ static bool narrowSDivOrSRem(BinaryOperator *Instr, const ConstantRange &LCR,
return true;
}
-static bool processURem(BinaryOperator *Instr, const ConstantRange &XCR,
- const ConstantRange &YCR) {
- assert(Instr->getOpcode() == Instruction::URem);
- assert(!Instr->getType()->isVectorTy());
+static bool expandUDivOrURem(BinaryOperator *Instr, const ConstantRange &XCR,
+ const ConstantRange &YCR) {
+ Type *Ty = Instr->getType();
+ assert(Instr->getOpcode() == Instruction::UDiv ||
+ Instr->getOpcode() == Instruction::URem);
+ assert(!Ty->isVectorTy());
+ bool IsRem = Instr->getOpcode() == Instruction::URem;
Value *X = Instr->getOperand(0);
Value *Y = Instr->getOperand(1);
+ // X u/ Y -> 0 iff X u< Y
// X u% Y -> X iff X u< Y
if (XCR.icmp(ICmpInst::ICMP_ULT, YCR)) {
- Instr->replaceAllUsesWith(X);
+ Instr->replaceAllUsesWith(IsRem ? X : Constant::getNullValue(Ty));
Instr->eraseFromParent();
- ++NumURemExpanded;
+ ++NumUDivURemsNarrowedExpanded;
return true;
}
@@ -798,17 +803,24 @@ static bool processURem(BinaryOperator *Instr, const ConstantRange &XCR,
return false;
IRBuilder<> B(Instr);
- // NOTE: this transformation introduces two uses of X,
- // but it may be undef so we must freeze it first.
- Value *FrozenX = B.CreateFreeze(X, X->getName() + ".frozen");
- auto *AdjX = B.CreateNUWSub(FrozenX, Y, Instr->getName() + ".urem");
- auto *Cmp =
- B.CreateICmp(ICmpInst::ICMP_ULT, FrozenX, Y, Instr->getName() + ".cmp");
- auto *ExpandedURem = B.CreateSelect(Cmp, FrozenX, AdjX);
- ExpandedURem->takeName(Instr);
- Instr->replaceAllUsesWith(ExpandedURem);
+ Value *ExpandedOp;
+ if (IsRem) {
+ // NOTE: this transformation introduces two uses of X,
+ // but it may be undef so we must freeze it first.
+ Value *FrozenX = B.CreateFreeze(X, X->getName() + ".frozen");
+ auto *AdjX = B.CreateNUWSub(FrozenX, Y, Instr->getName() + ".urem");
+ auto *Cmp =
+ B.CreateICmp(ICmpInst::ICMP_ULT, FrozenX, Y, Instr->getName() + ".cmp");
+ ExpandedOp = B.CreateSelect(Cmp, FrozenX, AdjX);
+ } else {
+ auto *Cmp =
+ B.CreateICmp(ICmpInst::ICMP_UGE, X, Y, Instr->getName() + ".cmp");
+ ExpandedOp = B.CreateZExt(Cmp, Ty, Instr->getName() + ".udiv");
+ }
+ ExpandedOp->takeName(Instr);
+ Instr->replaceAllUsesWith(ExpandedOp);
Instr->eraseFromParent();
- ++NumURemExpanded;
+ ++NumUDivURemsNarrowedExpanded;
return true;
}
@@ -860,7 +872,7 @@ static bool processUDivOrURem(BinaryOperator *Instr, LazyValueInfo *LVI) {
ConstantRange XCR = LVI->getConstantRangeAtUse(Instr->getOperandUse(0));
ConstantRange YCR = LVI->getConstantRangeAtUse(Instr->getOperandUse(1));
- if (Instr->getOpcode() == Instruction::URem && processURem(Instr, XCR, YCR))
+ if (expandUDivOrURem(Instr, XCR, YCR))
return true;
return narrowUDivOrURem(Instr, XCR, YCR);
diff --git a/llvm/test/Transforms/CorrelatedValuePropagation/udiv-expansion.ll b/llvm/test/Transforms/CorrelatedValuePropagation/udiv-expansion.ll
index 90a19e36e8ced..7284f280fa764 100644
--- a/llvm/test/Transforms/CorrelatedValuePropagation/udiv-expansion.ll
+++ b/llvm/test/Transforms/CorrelatedValuePropagation/udiv-expansion.ll
@@ -9,7 +9,6 @@ define i8 @constant.divisor.v3(i8 %x) {
; CHECK-LABEL: @constant.divisor.v3(
; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 3
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
; CHECK-NEXT: ret i8 0
;
%cmp.x.upper = icmp ult i8 %x, 3
@@ -21,7 +20,8 @@ define i8 @constant.divisor.v4(i8 %x) {
; CHECK-LABEL: @constant.divisor.v4(
; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 4
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%cmp.x.upper = icmp ult i8 %x, 4
@@ -32,7 +32,8 @@ define i8 @constant.divisor.v4(i8 %x) {
define i8 @constant.divisor.x.range.v4(ptr %x.ptr) {
; CHECK-LABEL: @constant.divisor.x.range.v4(
; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG0:![0-9]+]]
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%x = load i8, ptr %x.ptr, !range !{ i8 0, i8 4 }
@@ -42,7 +43,8 @@ define i8 @constant.divisor.x.range.v4(ptr %x.ptr) {
define i8 @constant.divisor.x.mask.v4(i8 %x) {
; CHECK-LABEL: @constant.divisor.x.mask.v4(
; CHECK-NEXT: [[X_MASKED:%.*]] = and i8 [[X:%.*]], 3
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X_MASKED]], 3
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X_MASKED]], 3
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%x.masked = and i8 %x, 3
@@ -53,7 +55,8 @@ define i8 @constant.divisor.v5(i8 %x) {
; CHECK-LABEL: @constant.divisor.v5(
; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 5
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%cmp.x.upper = icmp ult i8 %x, 5
@@ -65,7 +68,8 @@ define i8 @constant.divisor.v6(i8 %x) {
; CHECK-LABEL: @constant.divisor.v6(
; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 6
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 3
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 3
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%cmp.x.upper = icmp ult i8 %x, 6
@@ -96,7 +100,6 @@ define i8 @variable.v3(i8 %x, i8 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], [[Y]]
; CHECK-NEXT: ret i8 0
;
%cmp.x = icmp ult i8 %x, 3
@@ -116,7 +119,8 @@ define i8 @variable.v4(i8 %x, i8 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], [[Y]]
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%cmp.x = icmp ult i8 %x, 4
@@ -132,7 +136,8 @@ define i8 @variable.v4.range(ptr %x.ptr, ptr %y.ptr) {
; CHECK-LABEL: @variable.v4.range(
; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG0]]
; CHECK-NEXT: [[Y:%.*]] = load i8, ptr [[Y_PTR:%.*]], align 1, !range [[RNG1:![0-9]+]]
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], [[Y]]
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%x = load i8, ptr %x.ptr, !range !{ i8 0, i8 4 }
@@ -148,7 +153,8 @@ define i8 @variable.v5(i8 %x, i8 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], [[Y]]
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%cmp.x = icmp ult i8 %x, 5
@@ -168,7 +174,8 @@ define i8 @variable.v6(i8 %x, i8 %y) {
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_LOWER]])
; CHECK-NEXT: [[CMP_Y_UPPER:%.*]] = icmp ule i8 [[Y]], 4
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_Y_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], [[Y]]
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], [[Y]]
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%cmp.x = icmp ult i8 %x, 6
@@ -207,7 +214,6 @@ define i8 @large.divisor.v0(i8 %x) {
; CHECK-LABEL: @large.divisor.v0(
; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], 127
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 127
; CHECK-NEXT: ret i8 0
;
%cmp.x.upper = icmp ult i8 %x, 127
@@ -219,7 +225,8 @@ define i8 @large.divisor.v1(i8 %x) {
; CHECK-LABEL: @large.divisor.v1(
; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], -128
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 127
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 127
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%cmp.x.upper = icmp ult i8 %x, 128
@@ -230,7 +237,8 @@ define i8 @large.divisor.v1(i8 %x) {
define i8 @large.divisor.v1.range(ptr %x.ptr) {
; CHECK-LABEL: @large.divisor.v1.range(
; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG2:![0-9]+]]
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], 127
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], 127
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%x = load i8, ptr %x.ptr, !range !{ i8 0, i8 128 }
@@ -250,7 +258,6 @@ define i8 @large.divisor.with.overflow.v0(i8 %x) {
; CHECK-LABEL: @large.divisor.with.overflow.v0(
; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], -128
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], -128
; CHECK-NEXT: ret i8 0
;
%cmp.x.upper = icmp ult i8 %x, 128
@@ -262,7 +269,8 @@ define i8 @large.divisor.with.overflow.v1(i8 %x) {
; CHECK-LABEL: @large.divisor.with.overflow.v1(
; CHECK-NEXT: [[CMP_X_UPPER:%.*]] = icmp ult i8 [[X:%.*]], -127
; CHECK-NEXT: call void @llvm.assume(i1 [[CMP_X_UPPER]])
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], -128
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], -128
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%cmp.x.upper = icmp ult i8 %x, 129
@@ -273,7 +281,8 @@ define i8 @large.divisor.with.overflow.v1(i8 %x) {
define i8 @large.divisor.with.overflow.v1.range(ptr %x.ptr) {
; CHECK-LABEL: @large.divisor.with.overflow.v1.range(
; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[X_PTR:%.*]], align 1, !range [[RNG3:![0-9]+]]
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X]], -128
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X]], -128
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%x = load i8, ptr %x.ptr, !range !{ i8 0, i8 129 }
@@ -282,7 +291,8 @@ define i8 @large.divisor.with.overflow.v1.range(ptr %x.ptr) {
}
define i8 @large.divisor.with.overflow.v2.unbound.x(i8 %x) {
; CHECK-LABEL: @large.divisor.with.overflow.v2.unbound.x(
-; CHECK-NEXT: [[DIV:%.*]] = udiv i8 [[X:%.*]], -128
+; CHECK-NEXT: [[DIV_CMP:%.*]] = icmp uge i8 [[X:%.*]], -128
+; CHECK-NEXT: [[DIV:%.*]] = zext i1 [[DIV_CMP]] to i8
; CHECK-NEXT: ret i8 [[DIV]]
;
%div = udiv i8 %x, 128
More information about the llvm-commits
mailing list