[llvm] r311340 - revert r311333: [LibCallSimplifier] try harder to fold memcmp with constant arguments
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 21 08:16:25 PDT 2017
Author: spatel
Date: Mon Aug 21 08:16:25 2017
New Revision: 311340
URL: http://llvm.org/viewvc/llvm-project?rev=311340&view=rev
Log:
revert r311333: [LibCallSimplifier] try harder to fold memcmp with constant arguments
We're getting lots of compile-timeout bot failures like:
http://lab.llvm.org:8011/builders/clang-native-arm-lnt/builds/7119
http://lab.llvm.org:8011/builders/clang-cmake-x86_64-avx2-linux
Removed:
llvm/trunk/test/Transforms/InstCombine/memcmp-constant-fold.ll
Modified:
llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp
Modified: llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp?rev=311340&r1=311339&r2=311340&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp (original)
+++ llvm/trunk/lib/Transforms/Utils/SimplifyLibCalls.cpp Mon Aug 21 08:16:25 2017
@@ -18,7 +18,6 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/Triple.h"
-#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -752,44 +751,29 @@ Value *LibCallSimplifier::optimizeMemCmp
}
// memcmp(S1,S2,N/8)==0 -> (*(intN_t*)S1 != *(intN_t*)S2)==0
- // TODO: The case where both inputs are constants does not need to be limited
- // to legal integers or equality comparison. See block below this.
if (DL.isLegalInteger(Len * 8) && isOnlyUsedInZeroEqualityComparison(CI)) {
+
IntegerType *IntType = IntegerType::get(CI->getContext(), Len * 8);
unsigned PrefAlignment = DL.getPrefTypeAlignment(IntType);
- // First, see if we can fold either argument to a constant.
- Value *LHSV = nullptr;
- if (auto *LHSC = dyn_cast<Constant>(LHS)) {
- LHSC = ConstantExpr::getBitCast(LHSC, IntType->getPointerTo());
- LHSV = ConstantFoldLoadFromConstPtr(LHSC, IntType, DL);
- }
- Value *RHSV = nullptr;
- if (auto *RHSC = dyn_cast<Constant>(RHS)) {
- RHSC = ConstantExpr::getBitCast(RHSC, IntType->getPointerTo());
- RHSV = ConstantFoldLoadFromConstPtr(RHSC, IntType, DL);
- }
+ if (getKnownAlignment(LHS, DL, CI) >= PrefAlignment &&
+ getKnownAlignment(RHS, DL, CI) >= PrefAlignment) {
- // Don't generate unaligned loads. If either source is constant data,
- // alignment doesn't matter for that source because there is no load.
- if (!LHSV && getKnownAlignment(LHS, DL, CI) >= PrefAlignment) {
Type *LHSPtrTy =
IntType->getPointerTo(LHS->getType()->getPointerAddressSpace());
- LHSV = B.CreateLoad(B.CreateBitCast(LHS, LHSPtrTy), "lhsv");
- }
-
- if (!RHSV && getKnownAlignment(RHS, DL, CI) >= PrefAlignment) {
Type *RHSPtrTy =
IntType->getPointerTo(RHS->getType()->getPointerAddressSpace());
- RHSV = B.CreateLoad(B.CreateBitCast(RHS, RHSPtrTy), "rhsv");
- }
- if (LHSV && RHSV)
+ Value *LHSV =
+ B.CreateLoad(B.CreateBitCast(LHS, LHSPtrTy, "lhsc"), "lhsv");
+ Value *RHSV =
+ B.CreateLoad(B.CreateBitCast(RHS, RHSPtrTy, "rhsc"), "rhsv");
+
return B.CreateZExt(B.CreateICmpNE(LHSV, RHSV), CI->getType(), "memcmp");
+ }
}
- // Constant folding: memcmp(x, y, Len) -> constant (all arguments are const).
- // TODO: This is limited to i8 arrays.
+ // Constant folding: memcmp(x, y, l) -> cnst (all arguments are constant)
StringRef LHSStr, RHSStr;
if (getConstantStringInfo(LHS, LHSStr) &&
getConstantStringInfo(RHS, RHSStr)) {
Removed: llvm/trunk/test/Transforms/InstCombine/memcmp-constant-fold.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/memcmp-constant-fold.ll?rev=311339&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/memcmp-constant-fold.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/memcmp-constant-fold.ll (removed)
@@ -1,65 +0,0 @@
-; RUN: opt < %s -instcombine -S -data-layout=e-n32 | FileCheck %s --check-prefix=ALL --check-prefix=LE
-; RUN: opt < %s -instcombine -S -data-layout=E-n32 | FileCheck %s --check-prefix=ALL --check-prefix=BE
-
-declare i32 @memcmp(i8*, i8*, i64)
-
-; The alignment of this constant does not matter. We constant fold the load.
-
- at charbuf = private unnamed_addr constant [4 x i8] [i8 0, i8 0, i8 0, i8 1], align 1
-
-define i1 @memcmp_4bytes_unaligned_constant_i8(i8* align 4 %x) {
-; LE-LABEL: @memcmp_4bytes_unaligned_constant_i8(
-; LE-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
-; LE-NEXT: [[LHSV:%.*]] = load i32, i32* [[TMP1]], align 4
-; LE-NEXT: [[TMP2:%.*]] = icmp eq i32 [[LHSV]], 16777216
-; LE-NEXT: ret i1 [[TMP2]]
-;
-; BE-LABEL: @memcmp_4bytes_unaligned_constant_i8(
-; BE-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
-; BE-NEXT: [[LHSV:%.*]] = load i32, i32* [[TMP1]], align 4
-; BE-NEXT: [[TMP2:%.*]] = icmp eq i32 [[LHSV]], 1
-; BE-NEXT: ret i1 [[TMP2]]
-;
- %call = tail call i32 @memcmp(i8* %x, i8* getelementptr inbounds ([4 x i8], [4 x i8]* @charbuf, i64 0, i64 0), i64 4)
- %cmpeq0 = icmp eq i32 %call, 0
- ret i1 %cmpeq0
-}
-
-; We still don't care about alignment of the constant. We are not limited to constant folding only i8 arrays.
-; It doesn't matter if the constant operand is the first operand to the memcmp.
-
- at intbuf_unaligned = private unnamed_addr constant [4 x i16] [i16 1, i16 2, i16 3, i16 4], align 1
-
-define i1 @memcmp_4bytes_unaligned_constant_i16(i8* align 4 %x) {
-; LE-LABEL: @memcmp_4bytes_unaligned_constant_i16(
-; LE-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
-; LE-NEXT: [[RHSV:%.*]] = load i32, i32* [[TMP1]], align 4
-; LE-NEXT: [[TMP2:%.*]] = icmp eq i32 [[RHSV]], 131073
-; LE-NEXT: ret i1 [[TMP2]]
-;
-; BE-LABEL: @memcmp_4bytes_unaligned_constant_i16(
-; BE-NEXT: [[TMP1:%.*]] = bitcast i8* %x to i32*
-; BE-NEXT: [[RHSV:%.*]] = load i32, i32* [[TMP1]], align 4
-; BE-NEXT: [[TMP2:%.*]] = icmp eq i32 [[RHSV]], 65538
-; BE-NEXT: ret i1 [[TMP2]]
-;
- %call = tail call i32 @memcmp(i8* bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @intbuf_unaligned, i64 0, i64 0) to i8*), i8* %x, i64 4)
- %cmpeq0 = icmp eq i32 %call, 0
- ret i1 %cmpeq0
-}
-
-; TODO: Any memcmp where all arguments are constants should be constant folded. Currently, we only handle i8 array constants.
-
- at intbuf = private unnamed_addr constant [2 x i32] [i32 0, i32 1], align 4
-
-define i1 @memcmp_3bytes_aligned_constant_i32(i8* align 4 %x) {
-; ALL-LABEL: @memcmp_3bytes_aligned_constant_i32(
-; ALL-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 1) to i8*), i8* bitcast ([2 x i32]* @intbuf to i8*), i64 3)
-; ALL-NEXT: [[CMPEQ0:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT: ret i1 [[CMPEQ0]]
-;
- %call = tail call i32 @memcmp(i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 1) to i8*), i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 0) to i8*), i64 3)
- %cmpeq0 = icmp eq i32 %call, 0
- ret i1 %cmpeq0
-}
-
More information about the llvm-commits
mailing list