[llvm] 4e0008d - Revert "[InstCombine] try to narrow shifted bswap-of-zext"
Nathan Chancellor via llvm-commits
llvm-commits at lists.llvm.org
Tue Mar 22 17:41:03 PDT 2022
Author: Nathan Chancellor
Date: 2022-03-22T17:32:33-07:00
New Revision: 4e0008dcbe9fce99b9727e8bbeb129efc7bf2d80
URL: https://github.com/llvm/llvm-project/commit/4e0008dcbe9fce99b9727e8bbeb129efc7bf2d80
DIFF: https://github.com/llvm/llvm-project/commit/4e0008dcbe9fce99b9727e8bbeb129efc7bf2d80.diff
LOG: Revert "[InstCombine] try to narrow shifted bswap-of-zext"
This reverts commit 9e9bda2e8f5b88715bad767a4b7740df32b040d2.
This causes a backend error when building the Linux kernel for arm64.
See https://reviews.llvm.org/D122166 for a simplified reproducer.
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
llvm/test/Transforms/InstCombine/lshr.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 403979b4a6a39..03214118a2cf7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1173,21 +1173,6 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
MulC->logBase2() == ShAmtC)
return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *MulC - 2));
- // Try to narrow a bswap:
- // (bswap (zext X)) >> C --> zext (bswap X >> C')
- // In the case where the shift amount equals the bitwidth
diff erence, the
- // shift is eliminated.
- if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::bswap>(
- m_OneUse(m_ZExt(m_Value(X))))))) {
- // TODO: If the shift amount is less than the zext, we could shift left.
- unsigned WidthDiff = BitWidth - X->getType()->getScalarSizeInBits();
- if (ShAmtC >= WidthDiff) {
- Value *NarrowSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);
- Value *NewShift = Builder.CreateLShr(NarrowSwap, ShAmtC - WidthDiff);
- return new ZExtInst(NewShift, Ty);
- }
- }
-
// If the shifted-out value is known-zero, then this is an exact shift.
if (!I.isExact() &&
MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmtC), 0, &I)) {
diff --git a/llvm/test/Transforms/InstCombine/lshr.ll b/llvm/test/Transforms/InstCombine/lshr.ll
index a46f3f460f5e8..175430189ce12 100644
--- a/llvm/test/Transforms/InstCombine/lshr.ll
+++ b/llvm/test/Transforms/InstCombine/lshr.ll
@@ -831,8 +831,9 @@ define i1 @icmp_sge(i32 %x, i32 %y) {
define i32 @narrow_bswap(i16 %x) {
; CHECK-LABEL: @narrow_bswap(
-; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[X:%.*]])
-; CHECK-NEXT: [[S:%.*]] = zext i16 [[TMP1]] to i32
+; CHECK-NEXT: [[Z:%.*]] = zext i16 [[X:%.*]] to i32
+; CHECK-NEXT: [[B:%.*]] = call i32 @llvm.bswap.i32(i32 [[Z]])
+; CHECK-NEXT: [[S:%.*]] = lshr exact i32 [[B]], 16
; CHECK-NEXT: ret i32 [[S]]
;
%z = zext i16 %x to i32
@@ -843,8 +844,9 @@ define i32 @narrow_bswap(i16 %x) {
define i128 @narrow_bswap_extra_wide(i16 %x) {
; CHECK-LABEL: @narrow_bswap_extra_wide(
-; CHECK-NEXT: [[TMP1:%.*]] = call i16 @llvm.bswap.i16(i16 [[X:%.*]])
-; CHECK-NEXT: [[S:%.*]] = zext i16 [[TMP1]] to i128
+; CHECK-NEXT: [[Z:%.*]] = zext i16 [[X:%.*]] to i128
+; CHECK-NEXT: [[B:%.*]] = call i128 @llvm.bswap.i128(i128 [[Z]])
+; CHECK-NEXT: [[S:%.*]] = lshr exact i128 [[B]], 112
; CHECK-NEXT: ret i128 [[S]]
;
%z = zext i16 %x to i128
@@ -853,8 +855,6 @@ define i128 @narrow_bswap_extra_wide(i16 %x) {
ret i128 %s
}
-; TODO: The bswap can be narrowed followed by shl.
-
define i32 @narrow_bswap_undershift(i16 %x) {
; CHECK-LABEL: @narrow_bswap_undershift(
; CHECK-NEXT: [[Z:%.*]] = zext i16 [[X:%.*]] to i32
@@ -870,8 +870,9 @@ define i32 @narrow_bswap_undershift(i16 %x) {
define <2 x i64> @narrow_bswap_splat(<2 x i16> %x) {
; CHECK-LABEL: @narrow_bswap_splat(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i16> @llvm.bswap.v2i16(<2 x i16> [[X:%.*]])
-; CHECK-NEXT: [[S:%.*]] = zext <2 x i16> [[TMP1]] to <2 x i64>
+; CHECK-NEXT: [[Z:%.*]] = zext <2 x i16> [[X:%.*]] to <2 x i64>
+; CHECK-NEXT: [[B:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[Z]])
+; CHECK-NEXT: [[S:%.*]] = lshr exact <2 x i64> [[B]], <i64 48, i64 48>
; CHECK-NEXT: ret <2 x i64> [[S]]
;
%z = zext <2 x i16> %x to <2 x i64>
@@ -880,8 +881,6 @@ define <2 x i64> @narrow_bswap_splat(<2 x i16> %x) {
ret <2 x i64> %s
}
-; TODO: poison/undef in the shift amount is ok to propagate.
-
define <2 x i64> @narrow_bswap_splat_poison_elt(<2 x i16> %x) {
; CHECK-LABEL: @narrow_bswap_splat_poison_elt(
; CHECK-NEXT: [[Z:%.*]] = zext <2 x i16> [[X:%.*]] to <2 x i64>
@@ -897,9 +896,9 @@ define <2 x i64> @narrow_bswap_splat_poison_elt(<2 x i16> %x) {
define <2 x i64> @narrow_bswap_overshift(<2 x i32> %x) {
; CHECK-LABEL: @narrow_bswap_overshift(
-; CHECK-NEXT: [[TMP1:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[X:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = lshr <2 x i32> [[TMP1]], <i32 16, i32 16>
-; CHECK-NEXT: [[S:%.*]] = zext <2 x i32> [[TMP2]] to <2 x i64>
+; CHECK-NEXT: [[Z:%.*]] = zext <2 x i32> [[X:%.*]] to <2 x i64>
+; CHECK-NEXT: [[B:%.*]] = call <2 x i64> @llvm.bswap.v2i64(<2 x i64> [[Z]])
+; CHECK-NEXT: [[S:%.*]] = lshr <2 x i64> [[B]], <i64 48, i64 48>
; CHECK-NEXT: ret <2 x i64> [[S]]
;
%z = zext <2 x i32> %x to <2 x i64>
@@ -910,9 +909,9 @@ define <2 x i64> @narrow_bswap_overshift(<2 x i32> %x) {
define i128 @narrow_bswap_overshift2(i96 %x) {
; CHECK-LABEL: @narrow_bswap_overshift2(
-; CHECK-NEXT: [[TMP1:%.*]] = call i96 @llvm.bswap.i96(i96 [[X:%.*]])
-; CHECK-NEXT: [[TMP2:%.*]] = lshr i96 [[TMP1]], 29
-; CHECK-NEXT: [[S:%.*]] = zext i96 [[TMP2]] to i128
+; CHECK-NEXT: [[Z:%.*]] = zext i96 [[X:%.*]] to i128
+; CHECK-NEXT: [[B:%.*]] = call i128 @llvm.bswap.i128(i128 [[Z]])
+; CHECK-NEXT: [[S:%.*]] = lshr i128 [[B]], 61
; CHECK-NEXT: ret i128 [[S]]
;
%z = zext i96 %x to i128
More information about the llvm-commits
mailing list