[llvm] r364416 - Revert "r364412 [ExpandMemCmp][MergeICmps] Move passes out of CodeGen into opt pipeline."
Clement Courbet via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 26 05:13:14 PDT 2019
Author: courbet
Date: Wed Jun 26 05:13:13 2019
New Revision: 364416
URL: http://llvm.org/viewvc/llvm-project?rev=364416&view=rev
Log:
Revert "r364412 [ExpandMemCmp][MergeICmps] Move passes out of CodeGen into opt pipeline."
Breaks sanitizers:
libFuzzer :: cxxstring.test
libFuzzer :: memcmp.test
libFuzzer :: recommended-dictionary.test
libFuzzer :: strcmp.test
libFuzzer :: value-profile-mem.test
libFuzzer :: value-profile-strcmp.test
Added:
llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp
llvm/trunk/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
llvm/trunk/test/CodeGen/PowerPC/memcmp-mergeexpand.ll
llvm/trunk/test/CodeGen/PowerPC/memcmp.ll
llvm/trunk/test/CodeGen/PowerPC/memcmpIR.ll
llvm/trunk/test/CodeGen/X86/memcmp-mergeexpand.ll
llvm/trunk/test/CodeGen/X86/memcmp-optsize.ll
llvm/trunk/test/CodeGen/X86/memcmp.ll
Removed:
llvm/trunk/lib/Transforms/Scalar/ExpandMemCmp.cpp
llvm/trunk/test/Transforms/ExpandMemCmp/PowerPC/
llvm/trunk/test/Transforms/ExpandMemCmp/X86/pr36421.ll
llvm/trunk/test/Transforms/PhaseOrdering/PowerPC/
llvm/trunk/test/Transforms/PhaseOrdering/X86/
Modified:
llvm/trunk/include/llvm/CodeGen/Passes.h
llvm/trunk/include/llvm/Transforms/IPO/PassManagerBuilder.h
llvm/trunk/include/llvm/Transforms/Scalar.h
llvm/trunk/lib/CodeGen/CMakeLists.txt
llvm/trunk/lib/CodeGen/CodeGen.cpp
llvm/trunk/lib/CodeGen/TargetPassConfig.cpp
llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp
llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt
llvm/trunk/lib/Transforms/Scalar/MergeICmps.cpp
llvm/trunk/lib/Transforms/Scalar/Scalar.cpp
llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll
llvm/trunk/test/CodeGen/ARM/O3-pipeline.ll
llvm/trunk/test/CodeGen/Generic/llc-start-stop.ll
llvm/trunk/test/CodeGen/X86/O3-pipeline.ll
llvm/trunk/test/Other/opt-O2-pipeline.ll
llvm/trunk/test/Other/opt-O3-pipeline.ll
llvm/trunk/test/Other/opt-Os-pipeline.ll
llvm/trunk/test/Transforms/ExpandMemCmp/X86/memcmp.ll
llvm/trunk/tools/opt/opt.cpp
llvm/trunk/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
llvm/trunk/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn
Modified: llvm/trunk/include/llvm/CodeGen/Passes.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/Passes.h?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/Passes.h (original)
+++ llvm/trunk/include/llvm/CodeGen/Passes.h Wed Jun 26 05:13:13 2019
@@ -435,6 +435,9 @@ namespace llvm {
/// shuffles.
FunctionPass *createExpandReductionsPass();
+ // This pass expands memcmp() to load/stores.
+ FunctionPass *createExpandMemCmpPass();
+
/// Creates Break False Dependencies pass. \see BreakFalseDeps.cpp
FunctionPass *createBreakFalseDeps();
Modified: llvm/trunk/include/llvm/Transforms/IPO/PassManagerBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/IPO/PassManagerBuilder.h?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/IPO/PassManagerBuilder.h (original)
+++ llvm/trunk/include/llvm/Transforms/IPO/PassManagerBuilder.h Wed Jun 26 05:13:13 2019
@@ -195,7 +195,6 @@ private:
void addPGOInstrPasses(legacy::PassManagerBase &MPM, bool IsCS);
void addFunctionSimplificationPasses(legacy::PassManagerBase &MPM);
void addInstructionCombiningPass(legacy::PassManagerBase &MPM) const;
- void addMemcmpPasses(legacy::PassManagerBase &MPM) const;
public:
/// populateFunctionPassManager - This fills in the function pass manager,
Modified: llvm/trunk/include/llvm/Transforms/Scalar.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Transforms/Scalar.h?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Transforms/Scalar.h (original)
+++ llvm/trunk/include/llvm/Transforms/Scalar.h Wed Jun 26 05:13:13 2019
@@ -375,12 +375,6 @@ Pass *createMergeICmpsLegacyPass();
//===----------------------------------------------------------------------===//
//
-// ExpandMemCmp - This pass expands memcmp() to load/stores.
-//
-Pass *createExpandMemCmpPass();
-
-//===----------------------------------------------------------------------===//
-//
// ValuePropagation - Propagate CFG-derived value information
//
Pass *createCorrelatedValuePropagationPass();
Modified: llvm/trunk/lib/CodeGen/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/CMakeLists.txt?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/CMakeLists.txt (original)
+++ llvm/trunk/lib/CodeGen/CMakeLists.txt Wed Jun 26 05:13:13 2019
@@ -21,6 +21,7 @@ add_llvm_library(LLVMCodeGen
EarlyIfConversion.cpp
EdgeBundles.cpp
ExecutionDomainFix.cpp
+ ExpandMemCmp.cpp
ExpandPostRAPseudos.cpp
ExpandReductions.cpp
FaultMaps.cpp
Modified: llvm/trunk/lib/CodeGen/CodeGen.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/CodeGen.cpp?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/CodeGen.cpp (original)
+++ llvm/trunk/lib/CodeGen/CodeGen.cpp Wed Jun 26 05:13:13 2019
@@ -30,6 +30,7 @@ void llvm::initializeCodeGen(PassRegistr
initializeEarlyIfConverterPass(Registry);
initializeEarlyMachineLICMPass(Registry);
initializeEarlyTailDuplicatePass(Registry);
+ initializeExpandMemCmpPassPass(Registry);
initializeExpandPostRAPass(Registry);
initializeFEntryInserterPass(Registry);
initializeFinalizeISelPass(Registry);
Added: llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp?rev=364416&view=auto
==============================================================================
--- llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp (added)
+++ llvm/trunk/lib/CodeGen/ExpandMemCmp.cpp Wed Jun 26 05:13:13 2019
@@ -0,0 +1,871 @@
+//===--- ExpandMemCmp.cpp - Expand memcmp() to load/stores ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This pass tries to expand memcmp() calls into optimally-sized loads and
+// compares for the target.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/TargetLibraryInfo.h"
+#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Analysis/ValueTracking.h"
+#include "llvm/CodeGen/TargetLowering.h"
+#include "llvm/CodeGen/TargetPassConfig.h"
+#include "llvm/CodeGen/TargetSubtargetInfo.h"
+#include "llvm/IR/IRBuilder.h"
+
+using namespace llvm;
+
+#define DEBUG_TYPE "expandmemcmp"
+
+STATISTIC(NumMemCmpCalls, "Number of memcmp calls");
+STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size");
+STATISTIC(NumMemCmpGreaterThanMax,
+ "Number of memcmp calls with size greater than max size");
+STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls");
+
+static cl::opt<unsigned> MemCmpEqZeroNumLoadsPerBlock(
+ "memcmp-num-loads-per-block", cl::Hidden, cl::init(1),
+ cl::desc("The number of loads per basic block for inline expansion of "
+ "memcmp that is only being compared against zero."));
+
+static cl::opt<unsigned> MaxLoadsPerMemcmp(
+ "max-loads-per-memcmp", cl::Hidden,
+ cl::desc("Set maximum number of loads used in expanded memcmp"));
+
+static cl::opt<unsigned> MaxLoadsPerMemcmpOptSize(
+ "max-loads-per-memcmp-opt-size", cl::Hidden,
+ cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"));
+
+namespace {
+
+
+// This class provides helper functions to expand a memcmp library call into an
+// inline expansion.
+class MemCmpExpansion {
+ struct ResultBlock {
+ BasicBlock *BB = nullptr;
+ PHINode *PhiSrc1 = nullptr;
+ PHINode *PhiSrc2 = nullptr;
+
+ ResultBlock() = default;
+ };
+
+ CallInst *const CI;
+ ResultBlock ResBlock;
+ const uint64_t Size;
+ unsigned MaxLoadSize;
+ uint64_t NumLoadsNonOneByte;
+ const uint64_t NumLoadsPerBlockForZeroCmp;
+ std::vector<BasicBlock *> LoadCmpBlocks;
+ BasicBlock *EndBlock;
+ PHINode *PhiRes;
+ const bool IsUsedForZeroCmp;
+ const DataLayout &DL;
+ IRBuilder<> Builder;
+ // Represents the decomposition in blocks of the expansion. For example,
+ // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
+ // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {32, 1}.
+ struct LoadEntry {
+ LoadEntry(unsigned LoadSize, uint64_t Offset)
+ : LoadSize(LoadSize), Offset(Offset) {
+ }
+
+ // The size of the load for this block, in bytes.
+ unsigned LoadSize;
+ // The offset of this load from the base pointer, in bytes.
+ uint64_t Offset;
+ };
+ using LoadEntryVector = SmallVector<LoadEntry, 8>;
+ LoadEntryVector LoadSequence;
+
+ void createLoadCmpBlocks();
+ void createResultBlock();
+ void setupResultBlockPHINodes();
+ void setupEndBlockPHINodes();
+ Value *getCompareLoadPairs(unsigned BlockIndex, unsigned &LoadIndex);
+ void emitLoadCompareBlock(unsigned BlockIndex);
+ void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
+ unsigned &LoadIndex);
+ void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned OffsetBytes);
+ void emitMemCmpResultBlock();
+ Value *getMemCmpExpansionZeroCase();
+ Value *getMemCmpEqZeroOneBlock();
+ Value *getMemCmpOneBlock();
+ Value *getPtrToElementAtOffset(Value *Source, Type *LoadSizeType,
+ uint64_t OffsetBytes);
+
+ static LoadEntryVector
+ computeGreedyLoadSequence(uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
+ unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte);
+ static LoadEntryVector
+ computeOverlappingLoadSequence(uint64_t Size, unsigned MaxLoadSize,
+ unsigned MaxNumLoads,
+ unsigned &NumLoadsNonOneByte);
+
+public:
+ MemCmpExpansion(CallInst *CI, uint64_t Size,
+ const TargetTransformInfo::MemCmpExpansionOptions &Options,
+ const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout);
+
+ unsigned getNumBlocks();
+ uint64_t getNumLoads() const { return LoadSequence.size(); }
+
+ Value *getMemCmpExpansion();
+};
+
+MemCmpExpansion::LoadEntryVector MemCmpExpansion::computeGreedyLoadSequence(
+ uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
+ const unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte) {
+ NumLoadsNonOneByte = 0;
+ LoadEntryVector LoadSequence;
+ uint64_t Offset = 0;
+ while (Size && !LoadSizes.empty()) {
+ const unsigned LoadSize = LoadSizes.front();
+ const uint64_t NumLoadsForThisSize = Size / LoadSize;
+ if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
+ // Do not expand if the total number of loads is larger than what the
+ // target allows. Note that it's important that we exit before completing
+ // the expansion to avoid using a ton of memory to store the expansion for
+ // large sizes.
+ return {};
+ }
+ if (NumLoadsForThisSize > 0) {
+ for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) {
+ LoadSequence.push_back({LoadSize, Offset});
+ Offset += LoadSize;
+ }
+ if (LoadSize > 1)
+ ++NumLoadsNonOneByte;
+ Size = Size % LoadSize;
+ }
+ LoadSizes = LoadSizes.drop_front();
+ }
+ return LoadSequence;
+}
+
+MemCmpExpansion::LoadEntryVector
+MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size,
+ const unsigned MaxLoadSize,
+ const unsigned MaxNumLoads,
+ unsigned &NumLoadsNonOneByte) {
+ // These are already handled by the greedy approach.
+ if (Size < 2 || MaxLoadSize < 2)
+ return {};
+
+ // We try to do as many non-overlapping loads as possible starting from the
+ // beginning.
+ const uint64_t NumNonOverlappingLoads = Size / MaxLoadSize;
+ assert(NumNonOverlappingLoads && "there must be at least one load");
+ // There remain 0 to (MaxLoadSize - 1) bytes to load, this will be done with
+ // an overlapping load.
+ Size = Size - NumNonOverlappingLoads * MaxLoadSize;
+ // Bail if we do not need an overloapping store, this is already handled by
+ // the greedy approach.
+ if (Size == 0)
+ return {};
+ // Bail if the number of loads (non-overlapping + potential overlapping one)
+ // is larger than the max allowed.
+ if ((NumNonOverlappingLoads + 1) > MaxNumLoads)
+ return {};
+
+ // Add non-overlapping loads.
+ LoadEntryVector LoadSequence;
+ uint64_t Offset = 0;
+ for (uint64_t I = 0; I < NumNonOverlappingLoads; ++I) {
+ LoadSequence.push_back({MaxLoadSize, Offset});
+ Offset += MaxLoadSize;
+ }
+
+ // Add the last overlapping load.
+ assert(Size > 0 && Size < MaxLoadSize && "broken invariant");
+ LoadSequence.push_back({MaxLoadSize, Offset - (MaxLoadSize - Size)});
+ NumLoadsNonOneByte = 1;
+ return LoadSequence;
+}
+
+// Initialize the basic block structure required for expansion of memcmp call
+// with given maximum load size and memcmp size parameter.
+// This structure includes:
+// 1. A list of load compare blocks - LoadCmpBlocks.
+// 2. An EndBlock, split from original instruction point, which is the block to
+// return from.
+// 3. ResultBlock, block to branch to for early exit when a
+// LoadCmpBlock finds a difference.
+MemCmpExpansion::MemCmpExpansion(
+ CallInst *const CI, uint64_t Size,
+ const TargetTransformInfo::MemCmpExpansionOptions &Options,
+ const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout)
+ : CI(CI), Size(Size), MaxLoadSize(0), NumLoadsNonOneByte(0),
+ NumLoadsPerBlockForZeroCmp(Options.NumLoadsPerBlock),
+ IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), Builder(CI) {
+ assert(Size > 0 && "zero blocks");
+ // Scale the max size down if the target can load more bytes than we need.
+ llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
+ while (!LoadSizes.empty() && LoadSizes.front() > Size) {
+ LoadSizes = LoadSizes.drop_front();
+ }
+ assert(!LoadSizes.empty() && "cannot load Size bytes");
+ MaxLoadSize = LoadSizes.front();
+ // Compute the decomposition.
+ unsigned GreedyNumLoadsNonOneByte = 0;
+ LoadSequence = computeGreedyLoadSequence(Size, LoadSizes, Options.MaxNumLoads,
+ GreedyNumLoadsNonOneByte);
+ NumLoadsNonOneByte = GreedyNumLoadsNonOneByte;
+ assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
+ // If we allow overlapping loads and the load sequence is not already optimal,
+ // use overlapping loads.
+ if (Options.AllowOverlappingLoads &&
+ (LoadSequence.empty() || LoadSequence.size() > 2)) {
+ unsigned OverlappingNumLoadsNonOneByte = 0;
+ auto OverlappingLoads = computeOverlappingLoadSequence(
+ Size, MaxLoadSize, Options.MaxNumLoads, OverlappingNumLoadsNonOneByte);
+ if (!OverlappingLoads.empty() &&
+ (LoadSequence.empty() ||
+ OverlappingLoads.size() < LoadSequence.size())) {
+ LoadSequence = OverlappingLoads;
+ NumLoadsNonOneByte = OverlappingNumLoadsNonOneByte;
+ }
+ }
+ assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
+}
+
+unsigned MemCmpExpansion::getNumBlocks() {
+ if (IsUsedForZeroCmp)
+ return getNumLoads() / NumLoadsPerBlockForZeroCmp +
+ (getNumLoads() % NumLoadsPerBlockForZeroCmp != 0 ? 1 : 0);
+ return getNumLoads();
+}
+
+void MemCmpExpansion::createLoadCmpBlocks() {
+ for (unsigned i = 0; i < getNumBlocks(); i++) {
+ BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb",
+ EndBlock->getParent(), EndBlock);
+ LoadCmpBlocks.push_back(BB);
+ }
+}
+
+void MemCmpExpansion::createResultBlock() {
+ ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block",
+ EndBlock->getParent(), EndBlock);
+}
+
+/// Return a pointer to an element of type `LoadSizeType` at offset
+/// `OffsetBytes`.
+Value *MemCmpExpansion::getPtrToElementAtOffset(Value *Source,
+ Type *LoadSizeType,
+ uint64_t OffsetBytes) {
+ if (OffsetBytes > 0) {
+ auto *ByteType = Type::getInt8Ty(CI->getContext());
+ Source = Builder.CreateGEP(
+ ByteType, Builder.CreateBitCast(Source, ByteType->getPointerTo()),
+ ConstantInt::get(ByteType, OffsetBytes));
+ }
+ return Builder.CreateBitCast(Source, LoadSizeType->getPointerTo());
+}
+
+// This function creates the IR instructions for loading and comparing 1 byte.
+// It loads 1 byte from each source of the memcmp parameters with the given
+// GEPIndex. It then subtracts the two loaded values and adds this result to the
+// final phi node for selecting the memcmp result.
+void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
+ unsigned OffsetBytes) {
+ Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
+ Type *LoadSizeType = Type::getInt8Ty(CI->getContext());
+ Value *Source1 =
+ getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType, OffsetBytes);
+ Value *Source2 =
+ getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType, OffsetBytes);
+
+ Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
+ Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
+
+ LoadSrc1 = Builder.CreateZExt(LoadSrc1, Type::getInt32Ty(CI->getContext()));
+ LoadSrc2 = Builder.CreateZExt(LoadSrc2, Type::getInt32Ty(CI->getContext()));
+ Value *Diff = Builder.CreateSub(LoadSrc1, LoadSrc2);
+
+ PhiRes->addIncoming(Diff, LoadCmpBlocks[BlockIndex]);
+
+ if (BlockIndex < (LoadCmpBlocks.size() - 1)) {
+ // Early exit branch if difference found to EndBlock. Otherwise, continue to
+ // next LoadCmpBlock,
+ Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff,
+ ConstantInt::get(Diff->getType(), 0));
+ BranchInst *CmpBr =
+ BranchInst::Create(EndBlock, LoadCmpBlocks[BlockIndex + 1], Cmp);
+ Builder.Insert(CmpBr);
+ } else {
+ // The last block has an unconditional branch to EndBlock.
+ BranchInst *CmpBr = BranchInst::Create(EndBlock);
+ Builder.Insert(CmpBr);
+ }
+}
+
+/// Generate an equality comparison for one or more pairs of loaded values.
+/// This is used in the case where the memcmp() call is compared equal or not
+/// equal to zero.
+Value *MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex,
+ unsigned &LoadIndex) {
+ assert(LoadIndex < getNumLoads() &&
+ "getCompareLoadPairs() called with no remaining loads");
+ std::vector<Value *> XorList, OrList;
+ Value *Diff = nullptr;
+
+ const unsigned NumLoads =
+ std::min(getNumLoads() - LoadIndex, NumLoadsPerBlockForZeroCmp);
+
+ // For a single-block expansion, start inserting before the memcmp call.
+ if (LoadCmpBlocks.empty())
+ Builder.SetInsertPoint(CI);
+ else
+ Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
+
+ Value *Cmp = nullptr;
+ // If we have multiple loads per block, we need to generate a composite
+ // comparison using xor+or. The type for the combinations is the largest load
+ // type.
+ IntegerType *const MaxLoadType =
+ NumLoads == 1 ? nullptr
+ : IntegerType::get(CI->getContext(), MaxLoadSize * 8);
+ for (unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) {
+ const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex];
+
+ IntegerType *LoadSizeType =
+ IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
+
+ Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
+ CurLoadEntry.Offset);
+ Value *Source2 = getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType,
+ CurLoadEntry.Offset);
+
+ // Get a constant or load a value for each source address.
+ Value *LoadSrc1 = nullptr;
+ if (auto *Source1C = dyn_cast<Constant>(Source1))
+ LoadSrc1 = ConstantFoldLoadFromConstPtr(Source1C, LoadSizeType, DL);
+ if (!LoadSrc1)
+ LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
+
+ Value *LoadSrc2 = nullptr;
+ if (auto *Source2C = dyn_cast<Constant>(Source2))
+ LoadSrc2 = ConstantFoldLoadFromConstPtr(Source2C, LoadSizeType, DL);
+ if (!LoadSrc2)
+ LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
+
+ if (NumLoads != 1) {
+ if (LoadSizeType != MaxLoadType) {
+ LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType);
+ LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType);
+ }
+ // If we have multiple loads per block, we need to generate a composite
+ // comparison using xor+or.
+ Diff = Builder.CreateXor(LoadSrc1, LoadSrc2);
+ Diff = Builder.CreateZExt(Diff, MaxLoadType);
+ XorList.push_back(Diff);
+ } else {
+ // If there's only one load per block, we just compare the loaded values.
+ Cmp = Builder.CreateICmpNE(LoadSrc1, LoadSrc2);
+ }
+ }
+
+ auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> {
+ std::vector<Value *> OutList;
+ for (unsigned i = 0; i < InList.size() - 1; i = i + 2) {
+ Value *Or = Builder.CreateOr(InList[i], InList[i + 1]);
+ OutList.push_back(Or);
+ }
+ if (InList.size() % 2 != 0)
+ OutList.push_back(InList.back());
+ return OutList;
+ };
+
+ if (!Cmp) {
+ // Pairwise OR the XOR results.
+ OrList = pairWiseOr(XorList);
+
+ // Pairwise OR the OR results until one result left.
+ while (OrList.size() != 1) {
+ OrList = pairWiseOr(OrList);
+ }
+
+ assert(Diff && "Failed to find comparison diff");
+ Cmp = Builder.CreateICmpNE(OrList[0], ConstantInt::get(Diff->getType(), 0));
+ }
+
+ return Cmp;
+}
+
+void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
+ unsigned &LoadIndex) {
+ Value *Cmp = getCompareLoadPairs(BlockIndex, LoadIndex);
+
+ BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
+ ? EndBlock
+ : LoadCmpBlocks[BlockIndex + 1];
+ // Early exit branch if difference found to ResultBlock. Otherwise,
+ // continue to next LoadCmpBlock or EndBlock.
+ BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp);
+ Builder.Insert(CmpBr);
+
+ // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
+ // since early exit to ResultBlock was not taken (no difference was found in
+ // any of the bytes).
+ if (BlockIndex == LoadCmpBlocks.size() - 1) {
+ Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
+ PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
+ }
+}
+
+// This function creates the IR intructions for loading and comparing using the
+// given LoadSize. It loads the number of bytes specified by LoadSize from each
+// source of the memcmp parameters. It then does a subtract to see if there was
+// a difference in the loaded values. If a difference is found, it branches
+// with an early exit to the ResultBlock for calculating which source was
+// larger. Otherwise, it falls through to the either the next LoadCmpBlock or
+// the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with
+// a special case through emitLoadCompareByteBlock. The special handling can
+// simply subtract the loaded values and add it to the result phi node.
+void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) {
+ // There is one load per block in this case, BlockIndex == LoadIndex.
+ const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex];
+
+ if (CurLoadEntry.LoadSize == 1) {
+ MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, CurLoadEntry.Offset);
+ return;
+ }
+
+ Type *LoadSizeType =
+ IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
+ Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
+ assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
+
+ Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
+
+ Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
+ CurLoadEntry.Offset);
+ Value *Source2 = getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType,
+ CurLoadEntry.Offset);
+
+ // Load LoadSizeType from the base address.
+ Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
+ Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
+
+ if (DL.isLittleEndian()) {
+ Function *Bswap = Intrinsic::getDeclaration(CI->getModule(),
+ Intrinsic::bswap, LoadSizeType);
+ LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1);
+ LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2);
+ }
+
+ if (LoadSizeType != MaxLoadType) {
+ LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType);
+ LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType);
+ }
+
+ // Add the loaded values to the phi nodes for calculating memcmp result only
+ // if result is not used in a zero equality.
+ if (!IsUsedForZeroCmp) {
+ ResBlock.PhiSrc1->addIncoming(LoadSrc1, LoadCmpBlocks[BlockIndex]);
+ ResBlock.PhiSrc2->addIncoming(LoadSrc2, LoadCmpBlocks[BlockIndex]);
+ }
+
+ Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, LoadSrc1, LoadSrc2);
+ BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
+ ? EndBlock
+ : LoadCmpBlocks[BlockIndex + 1];
+ // Early exit branch if difference found to ResultBlock. Otherwise, continue
+ // to next LoadCmpBlock or EndBlock.
+ BranchInst *CmpBr = BranchInst::Create(NextBB, ResBlock.BB, Cmp);
+ Builder.Insert(CmpBr);
+
+ // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
+ // since early exit to ResultBlock was not taken (no difference was found in
+ // any of the bytes).
+ if (BlockIndex == LoadCmpBlocks.size() - 1) {
+ Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
+ PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
+ }
+}
+
+// This function populates the ResultBlock with a sequence to calculate the
+// memcmp result. It compares the two loaded source values and returns -1 if
+// src1 < src2 and 1 if src1 > src2.
+void MemCmpExpansion::emitMemCmpResultBlock() {
+ // Special case: if memcmp result is used in a zero equality, result does not
+ // need to be calculated and can simply return 1.
+ if (IsUsedForZeroCmp) {
+ BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
+ Builder.SetInsertPoint(ResBlock.BB, InsertPt);
+ Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1);
+ PhiRes->addIncoming(Res, ResBlock.BB);
+ BranchInst *NewBr = BranchInst::Create(EndBlock);
+ Builder.Insert(NewBr);
+ return;
+ }
+ BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
+ Builder.SetInsertPoint(ResBlock.BB, InsertPt);
+
+ Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1,
+ ResBlock.PhiSrc2);
+
+ Value *Res =
+ Builder.CreateSelect(Cmp, ConstantInt::get(Builder.getInt32Ty(), -1),
+ ConstantInt::get(Builder.getInt32Ty(), 1));
+
+ BranchInst *NewBr = BranchInst::Create(EndBlock);
+ Builder.Insert(NewBr);
+ PhiRes->addIncoming(Res, ResBlock.BB);
+}
+
+void MemCmpExpansion::setupResultBlockPHINodes() {
+ Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
+ Builder.SetInsertPoint(ResBlock.BB);
+ // Note: this assumes one load per block.
+ ResBlock.PhiSrc1 =
+ Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src1");
+ ResBlock.PhiSrc2 =
+ Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src2");
+}
+
+void MemCmpExpansion::setupEndBlockPHINodes() {
+ Builder.SetInsertPoint(&EndBlock->front());
+ PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res");
+}
+
+Value *MemCmpExpansion::getMemCmpExpansionZeroCase() {
+ unsigned LoadIndex = 0;
+ // This loop populates each of the LoadCmpBlocks with the IR sequence to
+ // handle multiple loads per block.
+ for (unsigned I = 0; I < getNumBlocks(); ++I) {
+ emitLoadCompareBlockMultipleLoads(I, LoadIndex);
+ }
+
+ emitMemCmpResultBlock();
+ return PhiRes;
+}
+
+/// A memcmp expansion that compares equality with 0 and only has one block of
+/// load and compare can bypass the compare, branch, and phi IR that is required
+/// in the general case.
+Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() {
+ unsigned LoadIndex = 0;
+ Value *Cmp = getCompareLoadPairs(0, LoadIndex);
+ assert(LoadIndex == getNumLoads() && "some entries were not consumed");
+ return Builder.CreateZExt(Cmp, Type::getInt32Ty(CI->getContext()));
+}
+
+/// A memcmp expansion that only has one block of load and compare can bypass
+/// the compare, branch, and phi IR that is required in the general case.
+Value *MemCmpExpansion::getMemCmpOneBlock() {
+ Type *LoadSizeType = IntegerType::get(CI->getContext(), Size * 8);
+ Value *Source1 = CI->getArgOperand(0);
+ Value *Source2 = CI->getArgOperand(1);
+
+ // Cast source to LoadSizeType*.
+ if (Source1->getType() != LoadSizeType)
+ Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo());
+ if (Source2->getType() != LoadSizeType)
+ Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo());
+
+ // Load LoadSizeType from the base address.
+ Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
+ Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
+
+ if (DL.isLittleEndian() && Size != 1) {
+ Function *Bswap = Intrinsic::getDeclaration(CI->getModule(),
+ Intrinsic::bswap, LoadSizeType);
+ LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1);
+ LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2);
+ }
+
+ if (Size < 4) {
+ // The i8 and i16 cases don't need compares. We zext the loaded values and
+ // subtract them to get the suitable negative, zero, or positive i32 result.
+ LoadSrc1 = Builder.CreateZExt(LoadSrc1, Builder.getInt32Ty());
+ LoadSrc2 = Builder.CreateZExt(LoadSrc2, Builder.getInt32Ty());
+ return Builder.CreateSub(LoadSrc1, LoadSrc2);
+ }
+
+ // The result of memcmp is negative, zero, or positive, so produce that by
+ // subtracting 2 extended compare bits: sub (ugt, ult).
+ // If a target prefers to use selects to get -1/0/1, they should be able
+ // to transform this later. The inverse transform (going from selects to math)
+ // may not be possible in the DAG because the selects got converted into
+ // branches before we got there.
+ Value *CmpUGT = Builder.CreateICmpUGT(LoadSrc1, LoadSrc2);
+ Value *CmpULT = Builder.CreateICmpULT(LoadSrc1, LoadSrc2);
+ Value *ZextUGT = Builder.CreateZExt(CmpUGT, Builder.getInt32Ty());
+ Value *ZextULT = Builder.CreateZExt(CmpULT, Builder.getInt32Ty());
+ return Builder.CreateSub(ZextUGT, ZextULT);
+}
+
+// This function expands the memcmp call into an inline expansion and returns
+// the memcmp result.
+Value *MemCmpExpansion::getMemCmpExpansion() {
+ // Create the basic block framework for a multi-block expansion.
+ if (getNumBlocks() != 1) {
+ BasicBlock *StartBlock = CI->getParent();
+ EndBlock = StartBlock->splitBasicBlock(CI, "endblock");
+ setupEndBlockPHINodes();
+ createResultBlock();
+
+ // If return value of memcmp is not used in a zero equality, we need to
+ // calculate which source was larger. The calculation requires the
+ // two loaded source values of each load compare block.
+ // These will be saved in the phi nodes created by setupResultBlockPHINodes.
+ if (!IsUsedForZeroCmp) setupResultBlockPHINodes();
+
+ // Create the number of required load compare basic blocks.
+ createLoadCmpBlocks();
+
+ // Update the terminator added by splitBasicBlock to branch to the first
+ // LoadCmpBlock.
+ StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]);
+ }
+
+ Builder.SetCurrentDebugLocation(CI->getDebugLoc());
+
+ if (IsUsedForZeroCmp)
+ return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock()
+ : getMemCmpExpansionZeroCase();
+
+ if (getNumBlocks() == 1)
+ return getMemCmpOneBlock();
+
+ for (unsigned I = 0; I < getNumBlocks(); ++I) {
+ emitLoadCompareBlock(I);
+ }
+
+ emitMemCmpResultBlock();
+ return PhiRes;
+}
+
+// This function checks to see if an expansion of memcmp can be generated.
+// It checks for constant compare size that is less than the max inline size.
+// If an expansion cannot occur, returns false to leave as a library call.
+// Otherwise, the library call is replaced with a new IR instruction sequence.
+/// We want to transform:
+/// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15)
+/// To:
+/// loadbb:
+/// %0 = bitcast i32* %buffer2 to i8*
+/// %1 = bitcast i32* %buffer1 to i8*
+/// %2 = bitcast i8* %1 to i64*
+/// %3 = bitcast i8* %0 to i64*
+/// %4 = load i64, i64* %2
+/// %5 = load i64, i64* %3
+/// %6 = call i64 @llvm.bswap.i64(i64 %4)
+/// %7 = call i64 @llvm.bswap.i64(i64 %5)
+/// %8 = sub i64 %6, %7
+/// %9 = icmp ne i64 %8, 0
+/// br i1 %9, label %res_block, label %loadbb1
+/// res_block: ; preds = %loadbb2,
+/// %loadbb1, %loadbb
+/// %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ]
+/// %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ]
+/// %10 = icmp ult i64 %phi.src1, %phi.src2
+/// %11 = select i1 %10, i32 -1, i32 1
+/// br label %endblock
+/// loadbb1: ; preds = %loadbb
+/// %12 = bitcast i32* %buffer2 to i8*
+/// %13 = bitcast i32* %buffer1 to i8*
+/// %14 = bitcast i8* %13 to i32*
+/// %15 = bitcast i8* %12 to i32*
+/// %16 = getelementptr i32, i32* %14, i32 2
+/// %17 = getelementptr i32, i32* %15, i32 2
+/// %18 = load i32, i32* %16
+/// %19 = load i32, i32* %17
+/// %20 = call i32 @llvm.bswap.i32(i32 %18)
+/// %21 = call i32 @llvm.bswap.i32(i32 %19)
+/// %22 = zext i32 %20 to i64
+/// %23 = zext i32 %21 to i64
+/// %24 = sub i64 %22, %23
+/// %25 = icmp ne i64 %24, 0
+/// br i1 %25, label %res_block, label %loadbb2
+/// loadbb2: ; preds = %loadbb1
+/// %26 = bitcast i32* %buffer2 to i8*
+/// %27 = bitcast i32* %buffer1 to i8*
+/// %28 = bitcast i8* %27 to i16*
+/// %29 = bitcast i8* %26 to i16*
+/// %30 = getelementptr i16, i16* %28, i16 6
+/// %31 = getelementptr i16, i16* %29, i16 6
+/// %32 = load i16, i16* %30
+/// %33 = load i16, i16* %31
+/// %34 = call i16 @llvm.bswap.i16(i16 %32)
+/// %35 = call i16 @llvm.bswap.i16(i16 %33)
+/// %36 = zext i16 %34 to i64
+/// %37 = zext i16 %35 to i64
+/// %38 = sub i64 %36, %37
+/// %39 = icmp ne i64 %38, 0
+/// br i1 %39, label %res_block, label %loadbb3
+/// loadbb3: ; preds = %loadbb2
+/// %40 = bitcast i32* %buffer2 to i8*
+/// %41 = bitcast i32* %buffer1 to i8*
+/// %42 = getelementptr i8, i8* %41, i8 14
+/// %43 = getelementptr i8, i8* %40, i8 14
+/// %44 = load i8, i8* %42
+/// %45 = load i8, i8* %43
+/// %46 = zext i8 %44 to i32
+/// %47 = zext i8 %45 to i32
+/// %48 = sub i32 %46, %47
+/// br label %endblock
+/// endblock: ; preds = %res_block,
+/// %loadbb3
+/// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
+/// ret i32 %phi.res
+static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
+ const TargetLowering *TLI, const DataLayout *DL) {
+ NumMemCmpCalls++;
+
+ // Early exit from expansion if -Oz.
+ if (CI->getFunction()->hasMinSize())
+ return false;
+
+ // Early exit from expansion if size is not a constant.
+ ConstantInt *SizeCast = dyn_cast<ConstantInt>(CI->getArgOperand(2));
+ if (!SizeCast) {
+ NumMemCmpNotConstant++;
+ return false;
+ }
+ const uint64_t SizeVal = SizeCast->getZExtValue();
+
+ if (SizeVal == 0) {
+ return false;
+ }
+ // TTI call to check if target would like to expand memcmp. Also, get the
+ // available load sizes.
+ const bool IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI);
+ auto Options = TTI->enableMemCmpExpansion(CI->getFunction()->hasOptSize(),
+ IsUsedForZeroCmp);
+ if (!Options) return false;
+
+ if (MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences())
+ Options.NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock;
+
+ if (CI->getFunction()->hasOptSize() &&
+ MaxLoadsPerMemcmpOptSize.getNumOccurrences())
+ Options.MaxNumLoads = MaxLoadsPerMemcmpOptSize;
+
+ if (!CI->getFunction()->hasOptSize() && MaxLoadsPerMemcmp.getNumOccurrences())
+ Options.MaxNumLoads = MaxLoadsPerMemcmp;
+
+ MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL);
+
+ // Don't expand if this will require more loads than desired by the target.
+ if (Expansion.getNumLoads() == 0) {
+ NumMemCmpGreaterThanMax++;
+ return false;
+ }
+
+ NumMemCmpInlined++;
+
+ Value *Res = Expansion.getMemCmpExpansion();
+
+ // Replace call with result of expansion and erase call.
+ CI->replaceAllUsesWith(Res);
+ CI->eraseFromParent();
+
+ return true;
+}
+
+
+
+class ExpandMemCmpPass : public FunctionPass {
+public:
+ static char ID;
+
+ ExpandMemCmpPass() : FunctionPass(ID) {
+ initializeExpandMemCmpPassPass(*PassRegistry::getPassRegistry());
+ }
+
+ bool runOnFunction(Function &F) override {
+ if (skipFunction(F)) return false;
+
+ auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
+ if (!TPC) {
+ return false;
+ }
+ const TargetLowering* TL =
+ TPC->getTM<TargetMachine>().getSubtargetImpl(F)->getTargetLowering();
+
+ const TargetLibraryInfo *TLI =
+ &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
+ const TargetTransformInfo *TTI =
+ &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
+ auto PA = runImpl(F, TLI, TTI, TL);
+ return !PA.areAllPreserved();
+ }
+
+private:
+ void getAnalysisUsage(AnalysisUsage &AU) const override {
+ AU.addRequired<TargetLibraryInfoWrapperPass>();
+ AU.addRequired<TargetTransformInfoWrapperPass>();
+ FunctionPass::getAnalysisUsage(AU);
+ }
+
+ PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI,
+ const TargetLowering* TL);
+ // Returns true if a change was made.
+ bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI, const TargetLowering* TL,
+ const DataLayout& DL);
+};
+
+bool ExpandMemCmpPass::runOnBlock(
+ BasicBlock &BB, const TargetLibraryInfo *TLI,
+ const TargetTransformInfo *TTI, const TargetLowering* TL,
+ const DataLayout& DL) {
+ for (Instruction& I : BB) {
+ CallInst *CI = dyn_cast<CallInst>(&I);
+ if (!CI) {
+ continue;
+ }
+ LibFunc Func;
+ if (TLI->getLibFunc(ImmutableCallSite(CI), Func) &&
+ (Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
+ expandMemCmp(CI, TTI, TL, &DL)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
+PreservedAnalyses ExpandMemCmpPass::runImpl(
+ Function &F, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI,
+ const TargetLowering* TL) {
+ const DataLayout& DL = F.getParent()->getDataLayout();
+ bool MadeChanges = false;
+ for (auto BBIt = F.begin(); BBIt != F.end();) {
+ if (runOnBlock(*BBIt, TLI, TTI, TL, DL)) {
+ MadeChanges = true;
+ // If changes were made, restart the function from the beginning, since
+ // the structure of the function was changed.
+ BBIt = F.begin();
+ } else {
+ ++BBIt;
+ }
+ }
+ return MadeChanges ? PreservedAnalyses::none() : PreservedAnalyses::all();
+}
+
+} // namespace
+
+char ExpandMemCmpPass::ID = 0;
+INITIALIZE_PASS_BEGIN(ExpandMemCmpPass, "expandmemcmp",
+ "Expand memcmp() to load/stores", false, false)
+INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
+INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
+INITIALIZE_PASS_END(ExpandMemCmpPass, "expandmemcmp",
+ "Expand memcmp() to load/stores", false, false)
+
+FunctionPass *llvm::createExpandMemCmpPass() {
+ return new ExpandMemCmpPass();
+}
Modified: llvm/trunk/lib/CodeGen/TargetPassConfig.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/TargetPassConfig.cpp?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/TargetPassConfig.cpp (original)
+++ llvm/trunk/lib/CodeGen/TargetPassConfig.cpp Wed Jun 26 05:13:13 2019
@@ -99,6 +99,9 @@ static cl::opt<bool> EnableImplicitNullC
"enable-implicit-null-checks",
cl::desc("Fold null checks into faulting memory operations"),
cl::init(false), cl::Hidden);
+static cl::opt<bool> DisableMergeICmps("disable-mergeicmps",
+ cl::desc("Disable MergeICmps Pass"),
+ cl::init(false), cl::Hidden);
static cl::opt<bool> PrintLSR("print-lsr-output", cl::Hidden,
cl::desc("Print LLVM IR produced by the loop-reduce pass"));
static cl::opt<bool> PrintISelInput("print-isel-input", cl::Hidden,
@@ -637,6 +640,16 @@ void TargetPassConfig::addIRPasses() {
addPass(createPrintFunctionPass(dbgs(), "\n\n*** Code after LSR ***\n"));
}
+ if (getOptLevel() != CodeGenOpt::None) {
+ // The MergeICmpsPass tries to create memcmp calls by grouping sequences of
+ // loads and compares. ExpandMemCmpPass then tries to expand those calls
+ // into optimally-sized loads and compares. The transforms are enabled by a
+ // target lowering hook.
+ if (!DisableMergeICmps)
+ addPass(createMergeICmpsLegacyPass());
+ addPass(createExpandMemCmpPass());
+ }
+
// Run GC lowering passes for builtin collectors
// TODO: add a pass insertion point here
addPass(createGCLoweringPass());
Modified: llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp (original)
+++ llvm/trunk/lib/Transforms/IPO/PassManagerBuilder.cpp Wed Jun 26 05:13:13 2019
@@ -246,18 +246,6 @@ void PassManagerBuilder::addInstructionC
PM.add(createInstructionCombiningPass(ExpensiveCombines));
}
-void PassManagerBuilder::addMemcmpPasses(legacy::PassManagerBase &PM) const {
- if (OptLevel > 0) {
- // The MergeICmpsPass tries to create memcmp calls by grouping sequences of
- // loads and compares. ExpandMemCmpPass then tries to expand those calls
- // into optimally-sized loads and compares. The transforms are enabled by a
- // target transform info hook.
- PM.add(createMergeICmpsLegacyPass());
- PM.add(createExpandMemCmpPass());
- PM.add(createEarlyCSEPass());
- }
-}
-
void PassManagerBuilder::populateFunctionPassManager(
legacy::FunctionPassManager &FPM) {
addExtensionsToPM(EP_EarlyAsPossible, FPM);
@@ -403,7 +391,6 @@ void PassManagerBuilder::addFunctionSimp
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies
}
MPM.add(createMemCpyOptPass()); // Remove memcpy / form memset
- addMemcmpPasses(MPM); // Merge/Expand comparisons.
MPM.add(createSCCPPass()); // Constant prop with SCCP
// Delete dead bit computations (instcombine runs after to fold away the dead
@@ -923,7 +910,6 @@ void PassManagerBuilder::addLTOOptimizat
PM.add(NewGVN ? createNewGVNPass()
: createGVNPass(DisableGVNLoadPRE)); // Remove redundancies.
PM.add(createMemCpyOptPass()); // Remove dead memcpys.
- addMemcmpPasses(PM); // Merge/Expand comparisons.
// Nuke dead stores.
PM.add(createDeadStoreEliminationPass());
Modified: llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt (original)
+++ llvm/trunk/lib/Transforms/Scalar/CMakeLists.txt Wed Jun 26 05:13:13 2019
@@ -10,7 +10,6 @@ add_llvm_library(LLVMScalarOpts
DeadStoreElimination.cpp
DivRemPairs.cpp
EarlyCSE.cpp
- ExpandMemCmp.cpp
FlattenCFGPass.cpp
Float2Int.cpp
GuardWidening.cpp
Removed: llvm/trunk/lib/Transforms/Scalar/ExpandMemCmp.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/ExpandMemCmp.cpp?rev=364415&view=auto
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/ExpandMemCmp.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/ExpandMemCmp.cpp (removed)
@@ -1,895 +0,0 @@
-//===--- ExpandMemCmp.cpp - Expand memcmp() to load/stores ----------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// This pass tries to expand memcmp() calls into optimally-sized loads and
-// compares for the target.
-//
-//===----------------------------------------------------------------------===//
-
-#include "llvm/ADT/Statistic.h"
-#include "llvm/Analysis/ConstantFolding.h"
-#include "llvm/Analysis/DomTreeUpdater.h"
-#include "llvm/Analysis/GlobalsModRef.h"
-#include "llvm/Analysis/TargetLibraryInfo.h"
-#include "llvm/Analysis/TargetTransformInfo.h"
-#include "llvm/Analysis/ValueTracking.h"
-#include "llvm/CodeGen/TargetSubtargetInfo.h"
-#include "llvm/IR/Dominators.h"
-#include "llvm/IR/IRBuilder.h"
-#include "llvm/Transforms/Scalar.h"
-
-using namespace llvm;
-
-#define DEBUG_TYPE "expandmemcmp"
-
-STATISTIC(NumMemCmpCalls, "Number of memcmp calls");
-STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size");
-STATISTIC(NumMemCmpGreaterThanMax,
- "Number of memcmp calls with size greater than max size");
-STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls");
-
-static cl::opt<unsigned> MemCmpEqZeroNumLoadsPerBlock(
- "memcmp-num-loads-per-block", cl::Hidden, cl::init(1),
- cl::desc("The number of loads per basic block for inline expansion of "
- "memcmp that is only being compared against zero."));
-
-static cl::opt<unsigned> MaxLoadsPerMemcmp(
- "max-loads-per-memcmp", cl::Hidden,
- cl::desc("Set maximum number of loads used in expanded memcmp"));
-
-static cl::opt<unsigned> MaxLoadsPerMemcmpOptSize(
- "max-loads-per-memcmp-opt-size", cl::Hidden,
- cl::desc("Set maximum number of loads used in expanded memcmp for -Os/Oz"));
-
-namespace {
-
-// This class provides helper functions to expand a memcmp library call into an
-// inline expansion.
-class MemCmpExpansion {
- struct ResultBlock {
- BasicBlock *BB = nullptr;
- PHINode *PhiSrc1 = nullptr;
- PHINode *PhiSrc2 = nullptr;
-
- ResultBlock() = default;
- };
-
- CallInst *const CI;
- ResultBlock ResBlock;
- const uint64_t Size;
- unsigned MaxLoadSize;
- uint64_t NumLoadsNonOneByte;
- const uint64_t NumLoadsPerBlockForZeroCmp;
- std::vector<BasicBlock *> LoadCmpBlocks;
- BasicBlock *EndBlock = nullptr;
- PHINode *PhiRes;
- const bool IsUsedForZeroCmp;
- const DataLayout &DL;
- IRBuilder<> Builder;
- DomTreeUpdater DTU;
- // Represents the decomposition in blocks of the expansion. For example,
- // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
- // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {32, 1}.
- struct LoadEntry {
- LoadEntry(unsigned LoadSize, uint64_t Offset)
- : LoadSize(LoadSize), Offset(Offset) {}
-
- // The size of the load for this block, in bytes.
- unsigned LoadSize;
- // The offset of this load from the base pointer, in bytes.
- uint64_t Offset;
- };
- using LoadEntryVector = SmallVector<LoadEntry, 8>;
- LoadEntryVector LoadSequence;
-
- void createLoadCmpBlocks();
- void createResultBlock();
- void setupResultBlockPHINodes();
- void setupEndBlockPHINodes();
- Value *getCompareLoadPairs(unsigned BlockIndex, unsigned &LoadIndex);
- void emitLoadCompareBlock(unsigned BlockIndex);
- void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
- unsigned &LoadIndex);
- void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned OffsetBytes);
- void emitMemCmpResultBlock();
- Value *getMemCmpExpansionZeroCase();
- Value *getMemCmpEqZeroOneBlock();
- Value *getMemCmpOneBlock();
- Value *getPtrToElementAtOffset(Value *Source, Type *LoadSizeType,
- uint64_t OffsetBytes);
-
- static LoadEntryVector
- computeGreedyLoadSequence(uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
- unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte);
- static LoadEntryVector
- computeOverlappingLoadSequence(uint64_t Size, unsigned MaxLoadSize,
- unsigned MaxNumLoads,
- unsigned &NumLoadsNonOneByte);
-
-public:
- MemCmpExpansion(CallInst *CI, uint64_t Size,
- const TargetTransformInfo::MemCmpExpansionOptions &Options,
- const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
- DominatorTree *DT);
-
- unsigned getNumBlocks();
- uint64_t getNumLoads() const { return LoadSequence.size(); }
-
- Value *getMemCmpExpansion();
-};
-
-MemCmpExpansion::LoadEntryVector MemCmpExpansion::computeGreedyLoadSequence(
- uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
- const unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte) {
- NumLoadsNonOneByte = 0;
- LoadEntryVector LoadSequence;
- uint64_t Offset = 0;
- while (Size && !LoadSizes.empty()) {
- const unsigned LoadSize = LoadSizes.front();
- const uint64_t NumLoadsForThisSize = Size / LoadSize;
- if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
- // Do not expand if the total number of loads is larger than what the
- // target allows. Note that it's important that we exit before completing
- // the expansion to avoid using a ton of memory to store the expansion for
- // large sizes.
- return {};
- }
- if (NumLoadsForThisSize > 0) {
- for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) {
- LoadSequence.push_back({LoadSize, Offset});
- Offset += LoadSize;
- }
- if (LoadSize > 1)
- ++NumLoadsNonOneByte;
- Size = Size % LoadSize;
- }
- LoadSizes = LoadSizes.drop_front();
- }
- return LoadSequence;
-}
-
-MemCmpExpansion::LoadEntryVector
-MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size,
- const unsigned MaxLoadSize,
- const unsigned MaxNumLoads,
- unsigned &NumLoadsNonOneByte) {
- // These are already handled by the greedy approach.
- if (Size < 2 || MaxLoadSize < 2)
- return {};
-
- // We try to do as many non-overlapping loads as possible starting from the
- // beginning.
- const uint64_t NumNonOverlappingLoads = Size / MaxLoadSize;
- assert(NumNonOverlappingLoads && "there must be at least one load");
- // There remain 0 to (MaxLoadSize - 1) bytes to load, this will be done with
- // an overlapping load.
- Size = Size - NumNonOverlappingLoads * MaxLoadSize;
- // Bail if we do not need an overloapping store, this is already handled by
- // the greedy approach.
- if (Size == 0)
- return {};
- // Bail if the number of loads (non-overlapping + potential overlapping one)
- // is larger than the max allowed.
- if ((NumNonOverlappingLoads + 1) > MaxNumLoads)
- return {};
-
- // Add non-overlapping loads.
- LoadEntryVector LoadSequence;
- uint64_t Offset = 0;
- for (uint64_t I = 0; I < NumNonOverlappingLoads; ++I) {
- LoadSequence.push_back({MaxLoadSize, Offset});
- Offset += MaxLoadSize;
- }
-
- // Add the last overlapping load.
- assert(Size > 0 && Size < MaxLoadSize && "broken invariant");
- LoadSequence.push_back({MaxLoadSize, Offset - (MaxLoadSize - Size)});
- NumLoadsNonOneByte = 1;
- return LoadSequence;
-}
-
-// Initialize the basic block structure required for expansion of memcmp call
-// with given maximum load size and memcmp size parameter.
-// This structure includes:
-// 1. A list of load compare blocks - LoadCmpBlocks.
-// 2. An EndBlock, split from original instruction point, which is the block to
-// return from.
-// 3. ResultBlock, block to branch to for early exit when a
-// LoadCmpBlock finds a difference.
-MemCmpExpansion::MemCmpExpansion(
- CallInst *const CI, uint64_t Size,
- const TargetTransformInfo::MemCmpExpansionOptions &Options,
- const bool IsUsedForZeroCmp, const DataLayout &TheDataLayout,
- DominatorTree *DT)
- : CI(CI), Size(Size), MaxLoadSize(0), NumLoadsNonOneByte(0),
- NumLoadsPerBlockForZeroCmp(Options.NumLoadsPerBlock),
- IsUsedForZeroCmp(IsUsedForZeroCmp), DL(TheDataLayout), Builder(CI),
- DTU(DT, /*PostDominator*/ nullptr,
- DomTreeUpdater::UpdateStrategy::Eager) {
- assert(Size > 0 && "zero blocks");
- // Scale the max size down if the target can load more bytes than we need.
- llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
- while (!LoadSizes.empty() && LoadSizes.front() > Size) {
- LoadSizes = LoadSizes.drop_front();
- }
- assert(!LoadSizes.empty() && "cannot load Size bytes");
- MaxLoadSize = LoadSizes.front();
- // Compute the decomposition.
- unsigned GreedyNumLoadsNonOneByte = 0;
- LoadSequence = computeGreedyLoadSequence(Size, LoadSizes, Options.MaxNumLoads,
- GreedyNumLoadsNonOneByte);
- NumLoadsNonOneByte = GreedyNumLoadsNonOneByte;
- assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
- // If we allow overlapping loads and the load sequence is not already optimal,
- // use overlapping loads.
- if (Options.AllowOverlappingLoads &&
- (LoadSequence.empty() || LoadSequence.size() > 2)) {
- unsigned OverlappingNumLoadsNonOneByte = 0;
- auto OverlappingLoads = computeOverlappingLoadSequence(
- Size, MaxLoadSize, Options.MaxNumLoads, OverlappingNumLoadsNonOneByte);
- if (!OverlappingLoads.empty() &&
- (LoadSequence.empty() ||
- OverlappingLoads.size() < LoadSequence.size())) {
- LoadSequence = OverlappingLoads;
- NumLoadsNonOneByte = OverlappingNumLoadsNonOneByte;
- }
- }
- assert(LoadSequence.size() <= Options.MaxNumLoads && "broken invariant");
-}
-
-unsigned MemCmpExpansion::getNumBlocks() {
- if (IsUsedForZeroCmp)
- return getNumLoads() / NumLoadsPerBlockForZeroCmp +
- (getNumLoads() % NumLoadsPerBlockForZeroCmp != 0 ? 1 : 0);
- return getNumLoads();
-}
-
-void MemCmpExpansion::createLoadCmpBlocks() {
- assert(ResBlock.BB && "ResBlock must be created before LoadCmpBlocks");
- for (unsigned i = 0; i < getNumBlocks(); i++) {
- BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb",
- EndBlock->getParent(), EndBlock);
- LoadCmpBlocks.push_back(BB);
- }
-}
-
-void MemCmpExpansion::createResultBlock() {
- assert(EndBlock && "EndBlock must be created before ResultBlock");
- ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block",
- EndBlock->getParent(), EndBlock);
-}
-
-/// Return a pointer to an element of type `LoadSizeType` at offset
-/// `OffsetBytes`.
-Value *MemCmpExpansion::getPtrToElementAtOffset(Value *Source,
- Type *LoadSizeType,
- uint64_t OffsetBytes) {
- if (OffsetBytes > 0) {
- auto *ByteType = Type::getInt8Ty(CI->getContext());
- Source = Builder.CreateGEP(
- ByteType, Builder.CreateBitCast(Source, ByteType->getPointerTo()),
- ConstantInt::get(ByteType, OffsetBytes));
- }
- return Builder.CreateBitCast(Source, LoadSizeType->getPointerTo());
-}
-
-// This function creates the IR instructions for loading and comparing 1 byte.
-// It loads 1 byte from each source of the memcmp parameters with the given
-// GEPIndex. It then subtracts the two loaded values and adds this result to the
-// final phi node for selecting the memcmp result.
-void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
- unsigned OffsetBytes) {
- BasicBlock *const BB = LoadCmpBlocks[BlockIndex];
- Builder.SetInsertPoint(BB);
- Type *LoadSizeType = Type::getInt8Ty(CI->getContext());
- Value *Source1 =
- getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType, OffsetBytes);
- Value *Source2 =
- getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType, OffsetBytes);
-
- Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
- Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
-
- LoadSrc1 = Builder.CreateZExt(LoadSrc1, Type::getInt32Ty(CI->getContext()));
- LoadSrc2 = Builder.CreateZExt(LoadSrc2, Type::getInt32Ty(CI->getContext()));
- Value *Diff = Builder.CreateSub(LoadSrc1, LoadSrc2);
-
- PhiRes->addIncoming(Diff, LoadCmpBlocks[BlockIndex]);
-
- if (BlockIndex < (LoadCmpBlocks.size() - 1)) {
- // Early exit branch if difference found to EndBlock. Otherwise, continue to
- // next LoadCmpBlock,
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff,
- ConstantInt::get(Diff->getType(), 0));
- BasicBlock *const NextBB = LoadCmpBlocks[BlockIndex + 1];
- BranchInst *CmpBr = BranchInst::Create(EndBlock, NextBB, Cmp);
- Builder.Insert(CmpBr);
- DTU.applyUpdates({{DominatorTree::Insert, BB, EndBlock},
- {DominatorTree::Insert, BB, NextBB}});
- } else {
- // The last block has an unconditional branch to EndBlock.
- BranchInst *CmpBr = BranchInst::Create(EndBlock);
- Builder.Insert(CmpBr);
- DTU.applyUpdates({{DominatorTree::Insert, BB, EndBlock}});
- }
-}
-
-/// Generate an equality comparison for one or more pairs of loaded values.
-/// This is used in the case where the memcmp() call is compared equal or not
-/// equal to zero.
-Value *MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex,
- unsigned &LoadIndex) {
- assert(LoadIndex < getNumLoads() &&
- "getCompareLoadPairs() called with no remaining loads");
- std::vector<Value *> XorList, OrList;
- Value *Diff = nullptr;
-
- const unsigned NumLoads =
- std::min(getNumLoads() - LoadIndex, NumLoadsPerBlockForZeroCmp);
-
- // For a single-block expansion, start inserting before the memcmp call.
- if (LoadCmpBlocks.empty())
- Builder.SetInsertPoint(CI);
- else
- Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
-
- Value *Cmp = nullptr;
- // If we have multiple loads per block, we need to generate a composite
- // comparison using xor+or. The type for the combinations is the largest load
- // type.
- IntegerType *const MaxLoadType =
- NumLoads == 1 ? nullptr
- : IntegerType::get(CI->getContext(), MaxLoadSize * 8);
- for (unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) {
- const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex];
-
- IntegerType *LoadSizeType =
- IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
-
- Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
- CurLoadEntry.Offset);
- Value *Source2 = getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType,
- CurLoadEntry.Offset);
-
- // Get a constant or load a value for each source address.
- Value *LoadSrc1 = nullptr;
- if (auto *Source1C = dyn_cast<Constant>(Source1))
- LoadSrc1 = ConstantFoldLoadFromConstPtr(Source1C, LoadSizeType, DL);
- if (!LoadSrc1)
- LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
-
- Value *LoadSrc2 = nullptr;
- if (auto *Source2C = dyn_cast<Constant>(Source2))
- LoadSrc2 = ConstantFoldLoadFromConstPtr(Source2C, LoadSizeType, DL);
- if (!LoadSrc2)
- LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
-
- if (NumLoads != 1) {
- if (LoadSizeType != MaxLoadType) {
- LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType);
- LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType);
- }
- // If we have multiple loads per block, we need to generate a composite
- // comparison using xor+or.
- Diff = Builder.CreateXor(LoadSrc1, LoadSrc2);
- Diff = Builder.CreateZExt(Diff, MaxLoadType);
- XorList.push_back(Diff);
- } else {
- // If there's only one load per block, we just compare the loaded values.
- Cmp = Builder.CreateICmpNE(LoadSrc1, LoadSrc2);
- }
- }
-
- auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> {
- std::vector<Value *> OutList;
- for (unsigned i = 0; i < InList.size() - 1; i = i + 2) {
- Value *Or = Builder.CreateOr(InList[i], InList[i + 1]);
- OutList.push_back(Or);
- }
- if (InList.size() % 2 != 0)
- OutList.push_back(InList.back());
- return OutList;
- };
-
- if (!Cmp) {
- // Pairwise OR the XOR results.
- OrList = pairWiseOr(XorList);
-
- // Pairwise OR the OR results until one result left.
- while (OrList.size() != 1) {
- OrList = pairWiseOr(OrList);
- }
-
- assert(Diff && "Failed to find comparison diff");
- Cmp = Builder.CreateICmpNE(OrList[0], ConstantInt::get(Diff->getType(), 0));
- }
-
- return Cmp;
-}
-
-void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
- unsigned &LoadIndex) {
- Value *Cmp = getCompareLoadPairs(BlockIndex, LoadIndex);
-
- BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
- ? EndBlock
- : LoadCmpBlocks[BlockIndex + 1];
- // Early exit branch if difference found to ResultBlock. Otherwise,
- // continue to next LoadCmpBlock or EndBlock.
- BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp);
- Builder.Insert(CmpBr);
- BasicBlock *const BB = LoadCmpBlocks[BlockIndex];
-
- // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
- // since early exit to ResultBlock was not taken (no difference was found in
- // any of the bytes).
- if (BlockIndex == LoadCmpBlocks.size() - 1) {
- Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
- PhiRes->addIncoming(Zero, BB);
- }
- DTU.applyUpdates({{DominatorTree::Insert, BB, ResBlock.BB},
- {DominatorTree::Insert, BB, NextBB}});
-}
-
-// This function creates the IR intructions for loading and comparing using the
-// given LoadSize. It loads the number of bytes specified by LoadSize from each
-// source of the memcmp parameters. It then does a subtract to see if there was
-// a difference in the loaded values. If a difference is found, it branches
-// with an early exit to the ResultBlock for calculating which source was
-// larger. Otherwise, it falls through to the either the next LoadCmpBlock or
-// the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with
-// a special case through emitLoadCompareByteBlock. The special handling can
-// simply subtract the loaded values and add it to the result phi node.
-void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) {
- // There is one load per block in this case, BlockIndex == LoadIndex.
- const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex];
-
- if (CurLoadEntry.LoadSize == 1) {
- MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, CurLoadEntry.Offset);
- return;
- }
-
- Type *LoadSizeType =
- IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
- Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
- assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
-
- BasicBlock *const BB = LoadCmpBlocks[BlockIndex];
- Builder.SetInsertPoint(BB);
-
- Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
- CurLoadEntry.Offset);
- Value *Source2 = getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType,
- CurLoadEntry.Offset);
-
- // Load LoadSizeType from the base address.
- Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
- Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
-
- if (DL.isLittleEndian()) {
- Function *Bswap = Intrinsic::getDeclaration(CI->getModule(),
- Intrinsic::bswap, LoadSizeType);
- LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1);
- LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2);
- }
-
- if (LoadSizeType != MaxLoadType) {
- LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType);
- LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType);
- }
-
- // Add the loaded values to the phi nodes for calculating memcmp result only
- // if result is not used in a zero equality.
- if (!IsUsedForZeroCmp) {
- ResBlock.PhiSrc1->addIncoming(LoadSrc1, LoadCmpBlocks[BlockIndex]);
- ResBlock.PhiSrc2->addIncoming(LoadSrc2, LoadCmpBlocks[BlockIndex]);
- }
-
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, LoadSrc1, LoadSrc2);
- BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
- ? EndBlock
- : LoadCmpBlocks[BlockIndex + 1];
- // Early exit branch if difference found to ResultBlock. Otherwise, continue
- // to next LoadCmpBlock or EndBlock.
- BranchInst *CmpBr = BranchInst::Create(NextBB, ResBlock.BB, Cmp);
- Builder.Insert(CmpBr);
-
- // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
- // since early exit to ResultBlock was not taken (no difference was found in
- // any of the bytes).
- if (BlockIndex == LoadCmpBlocks.size() - 1) {
- Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
- PhiRes->addIncoming(Zero, BB);
- }
- DTU.applyUpdates({{DominatorTree::Insert, BB, ResBlock.BB},
- {DominatorTree::Insert, BB, NextBB}});
-}
-
-// This function populates the ResultBlock with a sequence to calculate the
-// memcmp result. It compares the two loaded source values and returns -1 if
-// src1 < src2 and 1 if src1 > src2.
-void MemCmpExpansion::emitMemCmpResultBlock() {
- // Special case: if memcmp result is used in a zero equality, result does not
- // need to be calculated and can simply return 1.
- if (IsUsedForZeroCmp) {
- BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
- Builder.SetInsertPoint(ResBlock.BB, InsertPt);
- Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1);
- PhiRes->addIncoming(Res, ResBlock.BB);
- BranchInst *NewBr = BranchInst::Create(EndBlock);
- Builder.Insert(NewBr);
- DTU.applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
- return;
- }
- BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
- Builder.SetInsertPoint(ResBlock.BB, InsertPt);
-
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1,
- ResBlock.PhiSrc2);
-
- Value *Res =
- Builder.CreateSelect(Cmp, ConstantInt::get(Builder.getInt32Ty(), -1),
- ConstantInt::get(Builder.getInt32Ty(), 1));
-
- BranchInst *NewBr = BranchInst::Create(EndBlock);
- Builder.Insert(NewBr);
- PhiRes->addIncoming(Res, ResBlock.BB);
- DTU.applyUpdates({{DominatorTree::Insert, ResBlock.BB, EndBlock}});
-}
-
-void MemCmpExpansion::setupResultBlockPHINodes() {
- Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
- Builder.SetInsertPoint(ResBlock.BB);
- // Note: this assumes one load per block.
- ResBlock.PhiSrc1 =
- Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src1");
- ResBlock.PhiSrc2 =
- Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src2");
-}
-
-void MemCmpExpansion::setupEndBlockPHINodes() {
- Builder.SetInsertPoint(&EndBlock->front());
- PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res");
-}
-
-Value *MemCmpExpansion::getMemCmpExpansionZeroCase() {
- unsigned LoadIndex = 0;
- // This loop populates each of the LoadCmpBlocks with the IR sequence to
- // handle multiple loads per block.
- for (unsigned I = 0; I < getNumBlocks(); ++I) {
- emitLoadCompareBlockMultipleLoads(I, LoadIndex);
- }
-
- emitMemCmpResultBlock();
- return PhiRes;
-}
-
-/// A memcmp expansion that compares equality with 0 and only has one block of
-/// load and compare can bypass the compare, branch, and phi IR that is required
-/// in the general case.
-Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() {
- unsigned LoadIndex = 0;
- Value *Cmp = getCompareLoadPairs(0, LoadIndex);
- assert(LoadIndex == getNumLoads() && "some entries were not consumed");
- return Builder.CreateZExt(Cmp, Type::getInt32Ty(CI->getContext()));
-}
-
-/// A memcmp expansion that only has one block of load and compare can bypass
-/// the compare, branch, and phi IR that is required in the general case.
-Value *MemCmpExpansion::getMemCmpOneBlock() {
- Type *LoadSizeType = IntegerType::get(CI->getContext(), Size * 8);
- Value *Source1 = CI->getArgOperand(0);
- Value *Source2 = CI->getArgOperand(1);
-
- // Cast source to LoadSizeType*.
- if (Source1->getType() != LoadSizeType)
- Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo());
- if (Source2->getType() != LoadSizeType)
- Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo());
-
- // Load LoadSizeType from the base address.
- Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
- Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
-
- if (DL.isLittleEndian() && Size != 1) {
- Function *Bswap = Intrinsic::getDeclaration(CI->getModule(),
- Intrinsic::bswap, LoadSizeType);
- LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1);
- LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2);
- }
-
- if (Size < 4) {
- // The i8 and i16 cases don't need compares. We zext the loaded values and
- // subtract them to get the suitable negative, zero, or positive i32 result.
- LoadSrc1 = Builder.CreateZExt(LoadSrc1, Builder.getInt32Ty());
- LoadSrc2 = Builder.CreateZExt(LoadSrc2, Builder.getInt32Ty());
- return Builder.CreateSub(LoadSrc1, LoadSrc2);
- }
-
- // The result of memcmp is negative, zero, or positive, so produce that by
- // subtracting 2 extended compare bits: sub (ugt, ult).
- // If a target prefers to use selects to get -1/0/1, they should be able
- // to transform this later. The inverse transform (going from selects to math)
- // may not be possible in the DAG because the selects got converted into
- // branches before we got there.
- Value *CmpUGT = Builder.CreateICmpUGT(LoadSrc1, LoadSrc2);
- Value *CmpULT = Builder.CreateICmpULT(LoadSrc1, LoadSrc2);
- Value *ZextUGT = Builder.CreateZExt(CmpUGT, Builder.getInt32Ty());
- Value *ZextULT = Builder.CreateZExt(CmpULT, Builder.getInt32Ty());
- return Builder.CreateSub(ZextUGT, ZextULT);
-}
-
-// This function expands the memcmp call into an inline expansion and returns
-// the memcmp result.
-Value *MemCmpExpansion::getMemCmpExpansion() {
- // Create the basic block framework for a multi-block expansion.
- if (getNumBlocks() != 1) {
- BasicBlock *StartBlock = CI->getParent();
- EndBlock = StartBlock->splitBasicBlock(CI, "endblock");
- DTU.applyUpdates({{DominatorTree::Insert, StartBlock, EndBlock}});
- setupEndBlockPHINodes();
- createResultBlock();
-
- // If return value of memcmp is not used in a zero equality, we need to
- // calculate which source was larger. The calculation requires the
- // two loaded source values of each load compare block.
- // These will be saved in the phi nodes created by setupResultBlockPHINodes.
- if (!IsUsedForZeroCmp)
- setupResultBlockPHINodes();
-
- // Create the number of required load compare basic blocks.
- createLoadCmpBlocks();
-
- // Update the terminator added by splitBasicBlock to branch to the first
- // LoadCmpBlock.
- BasicBlock *const FirstLoadBB = LoadCmpBlocks[0];
- StartBlock->getTerminator()->setSuccessor(0, FirstLoadBB);
- DTU.applyUpdates({{DominatorTree::Delete, StartBlock, EndBlock},
- {DominatorTree::Insert, StartBlock, FirstLoadBB}});
- }
-
- Builder.SetCurrentDebugLocation(CI->getDebugLoc());
-
- if (IsUsedForZeroCmp)
- return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock()
- : getMemCmpExpansionZeroCase();
-
- if (getNumBlocks() == 1)
- return getMemCmpOneBlock();
-
- for (unsigned I = 0; I < getNumBlocks(); ++I) {
- emitLoadCompareBlock(I);
- }
-
- emitMemCmpResultBlock();
- return PhiRes;
-}
-
-// This function checks to see if an expansion of memcmp can be generated.
-// It checks for constant compare size that is less than the max inline size.
-// If an expansion cannot occur, returns false to leave as a library call.
-// Otherwise, the library call is replaced with a new IR instruction sequence.
-/// We want to transform:
-/// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15)
-/// To:
-/// loadbb:
-/// %0 = bitcast i32* %buffer2 to i8*
-/// %1 = bitcast i32* %buffer1 to i8*
-/// %2 = bitcast i8* %1 to i64*
-/// %3 = bitcast i8* %0 to i64*
-/// %4 = load i64, i64* %2
-/// %5 = load i64, i64* %3
-/// %6 = call i64 @llvm.bswap.i64(i64 %4)
-/// %7 = call i64 @llvm.bswap.i64(i64 %5)
-/// %8 = sub i64 %6, %7
-/// %9 = icmp ne i64 %8, 0
-/// br i1 %9, label %res_block, label %loadbb1
-/// res_block: ; preds = %loadbb2,
-/// %loadbb1, %loadbb
-/// %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ]
-/// %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ]
-/// %10 = icmp ult i64 %phi.src1, %phi.src2
-/// %11 = select i1 %10, i32 -1, i32 1
-/// br label %endblock
-/// loadbb1: ; preds = %loadbb
-/// %12 = bitcast i32* %buffer2 to i8*
-/// %13 = bitcast i32* %buffer1 to i8*
-/// %14 = bitcast i8* %13 to i32*
-/// %15 = bitcast i8* %12 to i32*
-/// %16 = getelementptr i32, i32* %14, i32 2
-/// %17 = getelementptr i32, i32* %15, i32 2
-/// %18 = load i32, i32* %16
-/// %19 = load i32, i32* %17
-/// %20 = call i32 @llvm.bswap.i32(i32 %18)
-/// %21 = call i32 @llvm.bswap.i32(i32 %19)
-/// %22 = zext i32 %20 to i64
-/// %23 = zext i32 %21 to i64
-/// %24 = sub i64 %22, %23
-/// %25 = icmp ne i64 %24, 0
-/// br i1 %25, label %res_block, label %loadbb2
-/// loadbb2: ; preds = %loadbb1
-/// %26 = bitcast i32* %buffer2 to i8*
-/// %27 = bitcast i32* %buffer1 to i8*
-/// %28 = bitcast i8* %27 to i16*
-/// %29 = bitcast i8* %26 to i16*
-/// %30 = getelementptr i16, i16* %28, i16 6
-/// %31 = getelementptr i16, i16* %29, i16 6
-/// %32 = load i16, i16* %30
-/// %33 = load i16, i16* %31
-/// %34 = call i16 @llvm.bswap.i16(i16 %32)
-/// %35 = call i16 @llvm.bswap.i16(i16 %33)
-/// %36 = zext i16 %34 to i64
-/// %37 = zext i16 %35 to i64
-/// %38 = sub i64 %36, %37
-/// %39 = icmp ne i64 %38, 0
-/// br i1 %39, label %res_block, label %loadbb3
-/// loadbb3: ; preds = %loadbb2
-/// %40 = bitcast i32* %buffer2 to i8*
-/// %41 = bitcast i32* %buffer1 to i8*
-/// %42 = getelementptr i8, i8* %41, i8 14
-/// %43 = getelementptr i8, i8* %40, i8 14
-/// %44 = load i8, i8* %42
-/// %45 = load i8, i8* %43
-/// %46 = zext i8 %44 to i32
-/// %47 = zext i8 %45 to i32
-/// %48 = sub i32 %46, %47
-/// br label %endblock
-/// endblock: ; preds = %res_block,
-/// %loadbb3
-/// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
-/// ret i32 %phi.res
-static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
- const DataLayout *DL, DominatorTree *DT) {
- NumMemCmpCalls++;
-
- // Early exit from expansion if -Oz.
- if (CI->getFunction()->hasMinSize())
- return false;
-
- // Early exit from expansion if size is not a constant.
- ConstantInt *SizeCast = dyn_cast<ConstantInt>(CI->getArgOperand(2));
- if (!SizeCast) {
- NumMemCmpNotConstant++;
- return false;
- }
- const uint64_t SizeVal = SizeCast->getZExtValue();
-
- if (SizeVal == 0) {
- return false;
- }
- // TTI call to check if target would like to expand memcmp. Also, get the
- // available load sizes.
- const bool IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI);
- auto Options = TTI->enableMemCmpExpansion(CI->getFunction()->hasOptSize(),
- IsUsedForZeroCmp);
- if (!Options)
- return false;
-
- if (MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences())
- Options.NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock;
-
- if (CI->getFunction()->hasOptSize() &&
- MaxLoadsPerMemcmpOptSize.getNumOccurrences())
- Options.MaxNumLoads = MaxLoadsPerMemcmpOptSize;
-
- if (!CI->getFunction()->hasOptSize() && MaxLoadsPerMemcmp.getNumOccurrences())
- Options.MaxNumLoads = MaxLoadsPerMemcmp;
-
- MemCmpExpansion Expansion(CI, SizeVal, Options, IsUsedForZeroCmp, *DL, DT);
-
- // Don't expand if this will require more loads than desired by the target.
- if (Expansion.getNumLoads() == 0) {
- NumMemCmpGreaterThanMax++;
- return false;
- }
-
- NumMemCmpInlined++;
-
- Value *Res = Expansion.getMemCmpExpansion();
-
- // Replace call with result of expansion and erase call.
- CI->replaceAllUsesWith(Res);
- CI->eraseFromParent();
-
- return true;
-}
-
-class ExpandMemCmpPass : public FunctionPass {
-public:
- static char ID;
-
- ExpandMemCmpPass() : FunctionPass(ID) {
- initializeExpandMemCmpPassPass(*PassRegistry::getPassRegistry());
- }
-
- bool runOnFunction(Function &F) override {
- if (skipFunction(F))
- return false;
-
- const TargetLibraryInfo *TLI =
- &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
- const TargetTransformInfo *TTI =
- &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
- // ExpandMemCmp does not need the DominatorTree, but we update it if it's
- // already available.
- auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
- auto PA = runImpl(F, TLI, TTI, DTWP ? &DTWP->getDomTree() : nullptr);
- return !PA.areAllPreserved();
- }
-
-private:
- void getAnalysisUsage(AnalysisUsage &AU) const override {
- AU.addRequired<TargetLibraryInfoWrapperPass>();
- AU.addRequired<TargetTransformInfoWrapperPass>();
- AU.addUsedIfAvailable<DominatorTreeWrapperPass>();
- AU.addPreserved<GlobalsAAWrapperPass>();
- AU.addPreserved<DominatorTreeWrapperPass>();
- FunctionPass::getAnalysisUsage(AU);
- }
-
- PreservedAnalyses runImpl(Function &F, const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI, DominatorTree *DT);
- // Returns true if a change was made.
- bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI, const DataLayout &DL,
- DominatorTree *DT);
-};
-
-bool ExpandMemCmpPass::runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI,
- const DataLayout &DL, DominatorTree *DT) {
- for (Instruction &I : BB) {
- CallInst *CI = dyn_cast<CallInst>(&I);
- if (!CI) {
- continue;
- }
- LibFunc Func;
- if (TLI->getLibFunc(ImmutableCallSite(CI), Func) &&
- (Func == LibFunc_memcmp || Func == LibFunc_bcmp) &&
- expandMemCmp(CI, TTI, &DL, DT)) {
- return true;
- }
- }
- return false;
-}
-
-PreservedAnalyses ExpandMemCmpPass::runImpl(Function &F,
- const TargetLibraryInfo *TLI,
- const TargetTransformInfo *TTI,
- DominatorTree *DT) {
- const DataLayout &DL = F.getParent()->getDataLayout();
- bool MadeChanges = false;
- for (auto BBIt = F.begin(); BBIt != F.end();) {
- if (runOnBlock(*BBIt, TLI, TTI, DL, DT)) {
- MadeChanges = true;
- // If changes were made, restart the function from the beginning, since
- // the structure of the function was changed.
- BBIt = F.begin();
- } else {
- ++BBIt;
- }
- }
- if (!MadeChanges)
- return PreservedAnalyses::all();
- PreservedAnalyses PA;
- PA.preserve<GlobalsAA>();
- PA.preserve<DominatorTreeAnalysis>();
- return PA;
-}
-
-} // namespace
-
-char ExpandMemCmpPass::ID = 0;
-INITIALIZE_PASS_BEGIN(ExpandMemCmpPass, "expandmemcmp",
- "Expand memcmp() to load/stores", false, false)
-INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
-INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
-INITIALIZE_PASS_END(ExpandMemCmpPass, "expandmemcmp",
- "Expand memcmp() to load/stores", false, false)
-
-Pass *llvm::createExpandMemCmpPass() { return new ExpandMemCmpPass(); }
Modified: llvm/trunk/lib/Transforms/Scalar/MergeICmps.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/MergeICmps.cpp?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/MergeICmps.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/MergeICmps.cpp Wed Jun 26 05:13:13 2019
@@ -866,7 +866,7 @@ static bool runImpl(Function &F, const T
// We only try merging comparisons if the target wants to expand memcmp later.
// The rationale is to avoid turning small chains into memcmp calls.
- if (!TTI.enableMemCmpExpansion(F.hasOptSize(), /*IsZeroCmp*/ true))
+ if (!TTI.enableMemCmpExpansion(F.hasOptSize(), true))
return false;
// If we don't have memcmp avaiable we can't emit calls to it.
Modified: llvm/trunk/lib/Transforms/Scalar/Scalar.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/Scalar.cpp?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/Scalar.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/Scalar.cpp Wed Jun 26 05:13:13 2019
@@ -84,7 +84,6 @@ void llvm::initializeScalarOpts(PassRegi
initializeLowerWidenableConditionLegacyPassPass(Registry);
initializeMemCpyOptLegacyPassPass(Registry);
initializeMergeICmpsLegacyPassPass(Registry);
- initializeExpandMemCmpPassPass(Registry);
initializeMergedLoadStoreMotionLegacyPassPass(Registry);
initializeNaryReassociateLegacyPassPass(Registry);
initializePartiallyInlineLibCallsLegacyPassPass(Registry);
Modified: llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll Wed Jun 26 05:13:13 2019
@@ -32,6 +32,10 @@
; CHECK-NEXT: Loop Pass Manager
; CHECK-NEXT: Induction Variable Users
; CHECK-NEXT: Loop Strength Reduction
+; CHECK-NEXT: Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT: Function Alias Analysis Results
+; CHECK-NEXT: Merge contiguous icmps into a memcmp
+; CHECK-NEXT: Expand memcmp() to load/stores
; CHECK-NEXT: Lower Garbage Collection Instructions
; CHECK-NEXT: Shadow Stack GC Lowering
; CHECK-NEXT: Remove unreachable blocks from the CFG
Modified: llvm/trunk/test/CodeGen/ARM/O3-pipeline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/O3-pipeline.ll?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/O3-pipeline.ll (original)
+++ llvm/trunk/test/CodeGen/ARM/O3-pipeline.ll Wed Jun 26 05:13:13 2019
@@ -16,6 +16,10 @@
; CHECK-NEXT: Loop Pass Manager
; CHECK-NEXT: Induction Variable Users
; CHECK-NEXT: Loop Strength Reduction
+; CHECK-NEXT: Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT: Function Alias Analysis Results
+; CHECK-NEXT: Merge contiguous icmps into a memcmp
+; CHECK-NEXT: Expand memcmp() to load/stores
; CHECK-NEXT: Lower Garbage Collection Instructions
; CHECK-NEXT: Shadow Stack GC Lowering
; CHECK-NEXT: Remove unreachable blocks from the CFG
Modified: llvm/trunk/test/CodeGen/Generic/llc-start-stop.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/llc-start-stop.ll?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/llc-start-stop.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/llc-start-stop.ll Wed Jun 26 05:13:13 2019
@@ -13,15 +13,15 @@
; STOP-BEFORE-NOT: Loop Strength Reduction
; RUN: llc < %s -debug-pass=Structure -start-after=loop-reduce -o /dev/null 2>&1 | FileCheck %s -check-prefix=START-AFTER
-; START-AFTER: -gc-lowering
+; START-AFTER: -aa -mergeicmps
; START-AFTER: FunctionPass Manager
-; START-AFTER-NEXT: Lower Garbage Collection Instructions
+; START-AFTER-NEXT: Dominator Tree Construction
; RUN: llc < %s -debug-pass=Structure -start-before=loop-reduce -o /dev/null 2>&1 | FileCheck %s -check-prefix=START-BEFORE
; START-BEFORE: -machine-branch-prob -domtree
; START-BEFORE: FunctionPass Manager
; START-BEFORE: Loop Strength Reduction
-; START-BEFORE-NEXT: Lower Garbage Collection Instructions
+; START-BEFORE-NEXT: Basic Alias Analysis (stateless AA impl)
; RUN: not llc < %s -start-before=nonexistent -o /dev/null 2>&1 | FileCheck %s -check-prefix=NONEXISTENT-START-BEFORE
; RUN: not llc < %s -stop-before=nonexistent -o /dev/null 2>&1 | FileCheck %s -check-prefix=NONEXISTENT-STOP-BEFORE
Added: llvm/trunk/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll?rev=364416&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll (added)
+++ llvm/trunk/test/CodeGen/PowerPC/memCmpUsedInZeroEqualityComparison.ll Wed Jun 26 05:13:13 2019
@@ -0,0 +1,220 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -ppc-gpr-icmps=all -verify-machineinstrs -mcpu=pwr8 < %s | FileCheck %s
+target datalayout = "e-m:e-i64:64-n32:64"
+target triple = "powerpc64le-unknown-linux-gnu"
+
+ at zeroEqualityTest01.buffer1 = private unnamed_addr constant [3 x i32] [i32 1, i32 2, i32 4], align 4
+ at zeroEqualityTest01.buffer2 = private unnamed_addr constant [3 x i32] [i32 1, i32 2, i32 3], align 4
+ at zeroEqualityTest02.buffer1 = private unnamed_addr constant [4 x i32] [i32 4, i32 0, i32 0, i32 0], align 4
+ at zeroEqualityTest02.buffer2 = private unnamed_addr constant [4 x i32] [i32 3, i32 0, i32 0, i32 0], align 4
+ at zeroEqualityTest03.buffer1 = private unnamed_addr constant [4 x i32] [i32 0, i32 0, i32 0, i32 3], align 4
+ at zeroEqualityTest03.buffer2 = private unnamed_addr constant [4 x i32] [i32 0, i32 0, i32 0, i32 4], align 4
+ at zeroEqualityTest04.buffer1 = private unnamed_addr constant [15 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14], align 4
+ at zeroEqualityTest04.buffer2 = private unnamed_addr constant [15 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 13], align 4
+
+declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1
+
+; Check 4 bytes - requires 1 load for each param.
+define signext i32 @zeroEqualityTest02(i8* %x, i8* %y) {
+; CHECK-LABEL: zeroEqualityTest02:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lwz 3, 0(3)
+; CHECK-NEXT: lwz 4, 0(4)
+; CHECK-NEXT: xor 3, 3, 4
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: srwi 3, 3, 5
+; CHECK-NEXT: xori 3, 3, 1
+; CHECK-NEXT: blr
+ %call = tail call signext i32 @memcmp(i8* %x, i8* %y, i64 4)
+ %not.cmp = icmp ne i32 %call, 0
+ %. = zext i1 %not.cmp to i32
+ ret i32 %.
+}
+
+; Check 16 bytes - requires 2 loads for each param (or use vectors?).
+define signext i32 @zeroEqualityTest01(i8* %x, i8* %y) {
+; CHECK-LABEL: zeroEqualityTest01:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld 5, 0(3)
+; CHECK-NEXT: ld 6, 0(4)
+; CHECK-NEXT: cmpld 5, 6
+; CHECK-NEXT: bne 0, .LBB1_2
+; CHECK-NEXT: # %bb.1: # %loadbb1
+; CHECK-NEXT: ld 3, 8(3)
+; CHECK-NEXT: ld 4, 8(4)
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: beq 0, .LBB1_3
+; CHECK-NEXT: .LBB1_2: # %res_block
+; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: .LBB1_3: # %endblock
+; CHECK-NEXT: clrldi 3, 3, 32
+; CHECK-NEXT: blr
+ %call = tail call signext i32 @memcmp(i8* %x, i8* %y, i64 16)
+ %not.tobool = icmp ne i32 %call, 0
+ %. = zext i1 %not.tobool to i32
+ ret i32 %.
+}
+
+; Check 7 bytes - requires 3 loads for each param.
+define signext i32 @zeroEqualityTest03(i8* %x, i8* %y) {
+; CHECK-LABEL: zeroEqualityTest03:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lwz 5, 0(3)
+; CHECK-NEXT: lwz 6, 0(4)
+; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: bne 0, .LBB2_3
+; CHECK-NEXT: # %bb.1: # %loadbb1
+; CHECK-NEXT: lhz 5, 4(3)
+; CHECK-NEXT: lhz 6, 4(4)
+; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: bne 0, .LBB2_3
+; CHECK-NEXT: # %bb.2: # %loadbb2
+; CHECK-NEXT: lbz 3, 6(3)
+; CHECK-NEXT: lbz 4, 6(4)
+; CHECK-NEXT: cmplw 3, 4
+; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: beq 0, .LBB2_4
+; CHECK-NEXT: .LBB2_3: # %res_block
+; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: .LBB2_4: # %endblock
+; CHECK-NEXT: clrldi 3, 3, 32
+; CHECK-NEXT: blr
+ %call = tail call signext i32 @memcmp(i8* %x, i8* %y, i64 7)
+ %not.lnot = icmp ne i32 %call, 0
+ %cond = zext i1 %not.lnot to i32
+ ret i32 %cond
+}
+
+; Validate with > 0
+define signext i32 @zeroEqualityTest04() {
+; CHECK-LABEL: zeroEqualityTest04:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addis 3, 2, .LzeroEqualityTest02.buffer1 at toc@ha
+; CHECK-NEXT: addis 4, 2, .LzeroEqualityTest02.buffer2 at toc@ha
+; CHECK-NEXT: addi 6, 3, .LzeroEqualityTest02.buffer1 at toc@l
+; CHECK-NEXT: addi 5, 4, .LzeroEqualityTest02.buffer2 at toc@l
+; CHECK-NEXT: ldbrx 3, 0, 6
+; CHECK-NEXT: ldbrx 4, 0, 5
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: bne 0, .LBB3_2
+; CHECK-NEXT: # %bb.1: # %loadbb1
+; CHECK-NEXT: li 4, 8
+; CHECK-NEXT: ldbrx 3, 6, 4
+; CHECK-NEXT: ldbrx 4, 5, 4
+; CHECK-NEXT: li 5, 0
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: beq 0, .LBB3_3
+; CHECK-NEXT: .LBB3_2: # %res_block
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: li 4, -1
+; CHECK-NEXT: isel 5, 4, 3, 0
+; CHECK-NEXT: .LBB3_3: # %endblock
+; CHECK-NEXT: extsw 3, 5
+; CHECK-NEXT: neg 3, 3
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: xori 3, 3, 1
+; CHECK-NEXT: blr
+ %call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest02.buffer2 to i8*), i64 16)
+ %not.cmp = icmp slt i32 %call, 1
+ %. = zext i1 %not.cmp to i32
+ ret i32 %.
+}
+
+; Validate with < 0
+define signext i32 @zeroEqualityTest05() {
+; CHECK-LABEL: zeroEqualityTest05:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addis 3, 2, .LzeroEqualityTest03.buffer1 at toc@ha
+; CHECK-NEXT: addis 4, 2, .LzeroEqualityTest03.buffer2 at toc@ha
+; CHECK-NEXT: addi 6, 3, .LzeroEqualityTest03.buffer1 at toc@l
+; CHECK-NEXT: addi 5, 4, .LzeroEqualityTest03.buffer2 at toc@l
+; CHECK-NEXT: ldbrx 3, 0, 6
+; CHECK-NEXT: ldbrx 4, 0, 5
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: bne 0, .LBB4_2
+; CHECK-NEXT: # %bb.1: # %loadbb1
+; CHECK-NEXT: li 4, 8
+; CHECK-NEXT: ldbrx 3, 6, 4
+; CHECK-NEXT: ldbrx 4, 5, 4
+; CHECK-NEXT: li 5, 0
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: beq 0, .LBB4_3
+; CHECK-NEXT: .LBB4_2: # %res_block
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: li 4, -1
+; CHECK-NEXT: isel 5, 4, 3, 0
+; CHECK-NEXT: .LBB4_3: # %endblock
+; CHECK-NEXT: nor 3, 5, 5
+; CHECK-NEXT: rlwinm 3, 3, 1, 31, 31
+; CHECK-NEXT: blr
+ %call = tail call signext i32 @memcmp(i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer1 to i8*), i8* bitcast ([4 x i32]* @zeroEqualityTest03.buffer2 to i8*), i64 16)
+ %call.lobit = lshr i32 %call, 31
+ %call.lobit.not = xor i32 %call.lobit, 1
+ ret i32 %call.lobit.not
+}
+
+; Validate with memcmp()?:
+define signext i32 @equalityFoldTwoConstants() {
+; CHECK-LABEL: equalityFoldTwoConstants:
+; CHECK: # %bb.0: # %loadbb
+; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: blr
+ %call = tail call signext i32 @memcmp(i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer1 to i8*), i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer2 to i8*), i64 16)
+ %not.tobool = icmp eq i32 %call, 0
+ %cond = zext i1 %not.tobool to i32
+ ret i32 %cond
+}
+
+define signext i32 @equalityFoldOneConstant(i8* %X) {
+; CHECK-LABEL: equalityFoldOneConstant:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ld 4, 0(3)
+; CHECK-NEXT: li 5, 1
+; CHECK-NEXT: sldi 5, 5, 32
+; CHECK-NEXT: cmpld 4, 5
+; CHECK-NEXT: bne 0, .LBB6_2
+; CHECK-NEXT: # %bb.1: # %loadbb1
+; CHECK-NEXT: li 4, 3
+; CHECK-NEXT: ld 3, 8(3)
+; CHECK-NEXT: sldi 4, 4, 32
+; CHECK-NEXT: ori 4, 4, 2
+; CHECK-NEXT: cmpld 3, 4
+; CHECK-NEXT: li 3, 0
+; CHECK-NEXT: beq 0, .LBB6_3
+; CHECK-NEXT: .LBB6_2: # %res_block
+; CHECK-NEXT: li 3, 1
+; CHECK-NEXT: .LBB6_3: # %endblock
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: srwi 3, 3, 5
+; CHECK-NEXT: blr
+ %call = tail call signext i32 @memcmp(i8* bitcast ([15 x i32]* @zeroEqualityTest04.buffer1 to i8*), i8* %X, i64 16)
+ %not.tobool = icmp eq i32 %call, 0
+ %cond = zext i1 %not.tobool to i32
+ ret i32 %cond
+}
+
+define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) {
+; CHECK-LABEL: length2_eq_nobuiltin_attr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: mflr 0
+; CHECK-NEXT: std 0, 16(1)
+; CHECK-NEXT: stdu 1, -32(1)
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset lr, 16
+; CHECK-NEXT: li 5, 2
+; CHECK-NEXT: bl memcmp
+; CHECK-NEXT: nop
+; CHECK-NEXT: cntlzw 3, 3
+; CHECK-NEXT: rlwinm 3, 3, 27, 31, 31
+; CHECK-NEXT: addi 1, 1, 32
+; CHECK-NEXT: ld 0, 16(1)
+; CHECK-NEXT: mtlr 0
+; CHECK-NEXT: blr
+ %m = tail call signext i32 @memcmp(i8* %X, i8* %Y, i64 2) nobuiltin
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
Added: llvm/trunk/test/CodeGen/PowerPC/memcmp-mergeexpand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/memcmp-mergeexpand.ll?rev=364416&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/memcmp-mergeexpand.ll (added)
+++ llvm/trunk/test/CodeGen/PowerPC/memcmp-mergeexpand.ll Wed Jun 26 05:13:13 2019
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-gnu-linux < %s | FileCheck %s -check-prefix=PPC64LE
+
+; This tests interaction between MergeICmp and ExpandMemCmp.
+
+%"struct.std::pair" = type { i32, i32 }
+
+define zeroext i1 @opeq1(
+; PPC64LE-LABEL: opeq1:
+; PPC64LE: # %bb.0: # %"entry+land.rhs.i"
+; PPC64LE-NEXT: ld 3, 0(3)
+; PPC64LE-NEXT: ld 4, 0(4)
+; PPC64LE-NEXT: xor 3, 3, 4
+; PPC64LE-NEXT: cntlzd 3, 3
+; PPC64LE-NEXT: rldicl 3, 3, 58, 63
+; PPC64LE-NEXT: blr
+ %"struct.std::pair"* nocapture readonly dereferenceable(8) %a,
+ %"struct.std::pair"* nocapture readonly dereferenceable(8) %b) local_unnamed_addr #0 {
+entry:
+ %first.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %a, i64 0, i32 0
+ %0 = load i32, i32* %first.i, align 4
+ %first1.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %b, i64 0, i32 0
+ %1 = load i32, i32* %first1.i, align 4
+ %cmp.i = icmp eq i32 %0, %1
+ br i1 %cmp.i, label %land.rhs.i, label %opeq1.exit
+
+land.rhs.i:
+ %second.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %a, i64 0, i32 1
+ %2 = load i32, i32* %second.i, align 4
+ %second2.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %b, i64 0, i32 1
+ %3 = load i32, i32* %second2.i, align 4
+ %cmp3.i = icmp eq i32 %2, %3
+ br label %opeq1.exit
+
+opeq1.exit:
+ %4 = phi i1 [ false, %entry ], [ %cmp3.i, %land.rhs.i ]
+ ret i1 %4
+}
+
+
Added: llvm/trunk/test/CodeGen/PowerPC/memcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/memcmp.ll?rev=364416&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/memcmp.ll (added)
+++ llvm/trunk/test/CodeGen/PowerPC/memcmp.ll Wed Jun 26 05:13:13 2019
@@ -0,0 +1,70 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le-unknown-gnu-linux < %s | FileCheck %s -check-prefix=CHECK
+
+define signext i32 @memcmp8(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ldbrx 3, 0, 3
+; CHECK-NEXT: ldbrx 4, 0, 4
+; CHECK-NEXT: subfc 5, 3, 4
+; CHECK-NEXT: subfe 5, 4, 4
+; CHECK-NEXT: subfc 4, 4, 3
+; CHECK-NEXT: subfe 3, 3, 3
+; CHECK-NEXT: neg 4, 5
+; CHECK-NEXT: neg 3, 3
+; CHECK-NEXT: subf 3, 3, 4
+; CHECK-NEXT: extsw 3, 3
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 8)
+ ret i32 %call
+}
+
+define signext i32 @memcmp4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp4:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lwbrx 3, 0, 3
+; CHECK-NEXT: lwbrx 4, 0, 4
+; CHECK-NEXT: sub 5, 4, 3
+; CHECK-NEXT: sub 3, 3, 4
+; CHECK-NEXT: rldicl 4, 5, 1, 63
+; CHECK-NEXT: rldicl 3, 3, 1, 63
+; CHECK-NEXT: subf 3, 3, 4
+; CHECK-NEXT: extsw 3, 3
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 4)
+ ret i32 %call
+}
+
+define signext i32 @memcmp2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp2:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lhbrx 3, 0, 3
+; CHECK-NEXT: lhbrx 4, 0, 4
+; CHECK-NEXT: subf 3, 4, 3
+; CHECK-NEXT: extsw 3, 3
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 2)
+ ret i32 %call
+}
+
+define signext i32 @memcmp1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+; CHECK-LABEL: memcmp1:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lbz 3, 0(3)
+; CHECK-NEXT: lbz 4, 0(4)
+; CHECK-NEXT: subf 3, 4, 3
+; CHECK-NEXT: extsw 3, 3
+; CHECK-NEXT: blr
+ %t0 = bitcast i32* %buffer1 to i8*
+ %t1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %t0, i8* %t1, i64 1) #2
+ ret i32 %call
+}
+
+declare signext i32 @memcmp(i8*, i8*, i64)
Added: llvm/trunk/test/CodeGen/PowerPC/memcmpIR.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/memcmpIR.ll?rev=364416&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/memcmpIR.ll (added)
+++ llvm/trunk/test/CodeGen/PowerPC/memcmpIR.ll Wed Jun 26 05:13:13 2019
@@ -0,0 +1,192 @@
+; RUN: llc -o - -mtriple=powerpc64le-unknown-gnu-linux -stop-after codegenprepare %s | FileCheck %s
+; RUN: llc -o - -mtriple=powerpc64-unknown-gnu-linux -stop-after codegenprepare %s | FileCheck %s --check-prefix=CHECK-BE
+
+define signext i32 @test1(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+entry:
+ ; CHECK-LABEL: @test1(
+ ; CHECK: [[LOAD1:%[0-9]+]] = load i64, i64*
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
+ ; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
+ ; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
+
+ ; CHECK-LABEL: res_block:{{.*}}
+ ; CHECK: [[ICMP2:%[0-9]+]] = icmp ult i64
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
+ ; CHECK-NEXT: br label %endblock
+
+ ; CHECK-LABEL: loadbb1:{{.*}}
+ ; CHECK: [[BCC1:%[0-9]+]] = bitcast i32* {{.*}} to i8*
+ ; CHECK-NEXT: [[BCC2:%[0-9]+]] = bitcast i32* {{.*}} to i8*
+ ; CHECK-NEXT: [[GEP1:%[0-9]+]] = getelementptr i8, i8* [[BCC2]], i8 8
+ ; CHECK-NEXT: [[BCL1:%[0-9]+]] = bitcast i8* [[GEP1]] to i64*
+ ; CHECK-NEXT: [[GEP2:%[0-9]+]] = getelementptr i8, i8* [[BCC1]], i8 8
+ ; CHECK-NEXT: [[BCL2:%[0-9]+]] = bitcast i8* [[GEP2]] to i64*
+ ; CHECK-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[BCL1]]
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[BCL2]]
+ ; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
+ ; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %endblock, label %res_block
+
+ ; CHECK-BE-LABEL: @test1(
+ ; CHECK-BE: [[LOAD1:%[0-9]+]] = load i64, i64*
+ ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
+
+ ; CHECK-BE-LABEL: res_block:{{.*}}
+ ; CHECK-BE: [[ICMP2:%[0-9]+]] = icmp ult i64
+ ; CHECK-BE-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
+ ; CHECK-BE-NEXT: br label %endblock
+
+ ; CHECK-BE-LABEL: loadbb1:{{.*}}
+ ; CHECK-BE: [[BCC1:%[0-9]+]] = bitcast i32* {{.*}} to i8*
+ ; CHECK-BE-NEXT: [[BCC2:%[0-9]+]] = bitcast i32* {{.*}} to i8*
+ ; CHECK-BE-NEXT: [[GEP1:%[0-9]+]] = getelementptr i8, i8* [[BCC2]], i8 8
+ ; CHECK-BE-NEXT: [[BCL1:%[0-9]+]] = bitcast i8* [[GEP1]] to i64*
+ ; CHECK-BE-NEXT: [[GEP2:%[0-9]+]] = getelementptr i8, i8* [[BCC1]], i8 8
+ ; CHECK-BE-NEXT: [[BCL2:%[0-9]+]] = bitcast i8* [[GEP2]] to i64*
+ ; CHECK-BE-NEXT: [[LOAD1:%[0-9]+]] = load i64, i64* [[BCL1]]
+ ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64* [[BCL2]]
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %endblock, label %res_block
+
+ %0 = bitcast i32* %buffer1 to i8*
+ %1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 16)
+ ret i32 %call
+}
+
+declare signext i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1
+
+define signext i32 @test2(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+ ; CHECK-LABEL: @test2(
+ ; CHECK: [[LOAD1:%[0-9]+]] = load i32, i32*
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
+ ; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD1]])
+ ; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD2]])
+ ; CHECK-NEXT: [[CMP1:%[0-9]+]] = icmp ugt i32 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: [[CMP2:%[0-9]+]] = icmp ult i32 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: [[Z1:%[0-9]+]] = zext i1 [[CMP1]] to i32
+ ; CHECK-NEXT: [[Z2:%[0-9]+]] = zext i1 [[CMP2]] to i32
+ ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i32 [[Z1]], [[Z2]]
+ ; CHECK-NEXT: ret i32 [[SUB]]
+
+ ; CHECK-BE-LABEL: @test2(
+ ; CHECK-BE: [[LOAD1:%[0-9]+]] = load i32, i32*
+ ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
+ ; CHECK-BE-NEXT: [[CMP1:%[0-9]+]] = icmp ugt i32 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: [[CMP2:%[0-9]+]] = icmp ult i32 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: [[Z1:%[0-9]+]] = zext i1 [[CMP1]] to i32
+ ; CHECK-BE-NEXT: [[Z2:%[0-9]+]] = zext i1 [[CMP2]] to i32
+ ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i32 [[Z1]], [[Z2]]
+ ; CHECK-BE-NEXT: ret i32 [[SUB]]
+
+entry:
+ %0 = bitcast i32* %buffer1 to i8*
+ %1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 4)
+ ret i32 %call
+}
+
+define signext i32 @test3(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+ ; CHECK: [[LOAD1:%[0-9]+]] = load i64, i64*
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
+ ; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD1]])
+ ; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i64 @llvm.bswap.i64(i64 [[LOAD2]])
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[BSWAP1]], [[BSWAP2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
+
+ ; CHECK-LABEL: res_block:{{.*}}
+ ; CHECK: [[ICMP2:%[0-9]+]] = icmp ult i64
+ ; CHECK-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
+ ; CHECK-NEXT: br label %endblock
+
+ ; CHECK-LABEL: loadbb1:{{.*}}
+ ; CHECK: [[LOAD1:%[0-9]+]] = load i32, i32*
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
+ ; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD1]])
+ ; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i32 @llvm.bswap.i32(i32 [[LOAD2]])
+ ; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[BSWAP1]] to i64
+ ; CHECK-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[BSWAP2]] to i64
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb2, label %res_block
+
+ ; CHECK-LABEL: loadbb2:{{.*}}
+ ; CHECK: [[LOAD1:%[0-9]+]] = load i16, i16*
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i16, i16*
+ ; CHECK-NEXT: [[BSWAP1:%[0-9]+]] = call i16 @llvm.bswap.i16(i16 [[LOAD1]])
+ ; CHECK-NEXT: [[BSWAP2:%[0-9]+]] = call i16 @llvm.bswap.i16(i16 [[LOAD2]])
+ ; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i16 [[BSWAP1]] to i64
+ ; CHECK-NEXT: [[ZEXT2:%[0-9]+]] = zext i16 [[BSWAP2]] to i64
+ ; CHECK-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-NEXT: br i1 [[ICMP]], label %loadbb3, label %res_block
+
+ ; CHECK-LABEL: loadbb3:{{.*}}
+ ; CHECK: [[LOAD1:%[0-9]+]] = load i8, i8*
+ ; CHECK-NEXT: [[LOAD2:%[0-9]+]] = load i8, i8*
+ ; CHECK-NEXT: [[ZEXT1:%[0-9]+]] = zext i8 [[LOAD1]] to i32
+ ; CHECK-NEXT: [[ZEXT2:%[0-9]+]] = zext i8 [[LOAD2]] to i32
+ ; CHECK-NEXT: [[SUB:%[0-9]+]] = sub i32 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-NEXT: br label %endblock
+
+ ; CHECK-BE: [[LOAD1:%[0-9]+]] = load i64, i64*
+ ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i64, i64*
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[LOAD1]], [[LOAD2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb1, label %res_block
+
+ ; CHECK-BE-LABEL: res_block:{{.*}}
+ ; CHECK-BE: [[ICMP2:%[0-9]+]] = icmp ult i64
+ ; CHECK-BE-NEXT: [[SELECT:%[0-9]+]] = select i1 [[ICMP2]], i32 -1, i32 1
+ ; CHECK-BE-NEXT: br label %endblock
+
+ ; CHECK-BE: [[LOAD1:%[0-9]+]] = load i32, i32*
+ ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i32, i32*
+ ; CHECK-BE-NEXT: [[ZEXT1:%[0-9]+]] = zext i32 [[LOAD1]] to i64
+ ; CHECK-BE-NEXT: [[ZEXT2:%[0-9]+]] = zext i32 [[LOAD2]] to i64
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb2, label %res_block
+
+ ; CHECK-BE: [[LOAD1:%[0-9]+]] = load i16, i16*
+ ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i16, i16*
+ ; CHECK-BE-NEXT: [[ZEXT1:%[0-9]+]] = zext i16 [[LOAD1]] to i64
+ ; CHECK-BE-NEXT: [[ZEXT2:%[0-9]+]] = zext i16 [[LOAD2]] to i64
+ ; CHECK-BE-NEXT: [[ICMP:%[0-9]+]] = icmp eq i64 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-BE-NEXT: br i1 [[ICMP]], label %loadbb3, label %res_block
+
+ ; CHECK-BE: [[LOAD1:%[0-9]+]] = load i8, i8*
+ ; CHECK-BE-NEXT: [[LOAD2:%[0-9]+]] = load i8, i8*
+ ; CHECK-BE-NEXT: [[ZEXT1:%[0-9]+]] = zext i8 [[LOAD1]] to i32
+ ; CHECK-BE-NEXT: [[ZEXT2:%[0-9]+]] = zext i8 [[LOAD2]] to i32
+ ; CHECK-BE-NEXT: [[SUB:%[0-9]+]] = sub i32 [[ZEXT1]], [[ZEXT2]]
+ ; CHECK-BE-NEXT: br label %endblock
+
+entry:
+ %0 = bitcast i32* %buffer1 to i8*
+ %1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 15)
+ ret i32 %call
+}
+ ; CHECK: call = tail call signext i32 @memcmp
+ ; CHECK-BE: call = tail call signext i32 @memcmp
+define signext i32 @test4(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2) {
+
+entry:
+ %0 = bitcast i32* %buffer1 to i8*
+ %1 = bitcast i32* %buffer2 to i8*
+ %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 65)
+ ret i32 %call
+}
+
+define signext i32 @test5(i32* nocapture readonly %buffer1, i32* nocapture readonly %buffer2, i32 signext %SIZE) {
+ ; CHECK: call = tail call signext i32 @memcmp
+ ; CHECK-BE: call = tail call signext i32 @memcmp
+entry:
+ %0 = bitcast i32* %buffer1 to i8*
+ %1 = bitcast i32* %buffer2 to i8*
+ %conv = sext i32 %SIZE to i64
+ %call = tail call signext i32 @memcmp(i8* %0, i8* %1, i64 %conv)
+ ret i32 %call
+}
Modified: llvm/trunk/test/CodeGen/X86/O3-pipeline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/O3-pipeline.ll?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/X86/O3-pipeline.ll (original)
+++ llvm/trunk/test/CodeGen/X86/O3-pipeline.ll Wed Jun 26 05:13:13 2019
@@ -29,6 +29,10 @@
; CHECK-NEXT: Loop Pass Manager
; CHECK-NEXT: Induction Variable Users
; CHECK-NEXT: Loop Strength Reduction
+; CHECK-NEXT: Basic Alias Analysis (stateless AA impl)
+; CHECK-NEXT: Function Alias Analysis Results
+; CHECK-NEXT: Merge contiguous icmps into a memcmp
+; CHECK-NEXT: Expand memcmp() to load/stores
; CHECK-NEXT: Lower Garbage Collection Instructions
; CHECK-NEXT: Shadow Stack GC Lowering
; CHECK-NEXT: Remove unreachable blocks from the CFG
Added: llvm/trunk/test/CodeGen/X86/memcmp-mergeexpand.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcmp-mergeexpand.ll?rev=364416&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcmp-mergeexpand.ll (added)
+++ llvm/trunk/test/CodeGen/X86/memcmp-mergeexpand.ll Wed Jun 26 05:13:13 2019
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown | FileCheck %s --check-prefix=X86
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64
+
+; This tests interaction between MergeICmp and ExpandMemCmp.
+
+%"struct.std::pair" = type { i32, i32 }
+
+define zeroext i1 @opeq1(
+; X86-LABEL: opeq1:
+; X86: # %bb.0: # %"entry+land.rhs.i"
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %edx
+; X86-NEXT: movl 4(%ecx), %ecx
+; X86-NEXT: xorl (%eax), %edx
+; X86-NEXT: xorl 4(%eax), %ecx
+; X86-NEXT: orl %edx, %ecx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: opeq1:
+; X64: # %bb.0: # %"entry+land.rhs.i"
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %"struct.std::pair"* nocapture readonly dereferenceable(8) %a,
+ %"struct.std::pair"* nocapture readonly dereferenceable(8) %b) local_unnamed_addr #0 {
+entry:
+ %first.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %a, i64 0, i32 0
+ %0 = load i32, i32* %first.i, align 4
+ %first1.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %b, i64 0, i32 0
+ %1 = load i32, i32* %first1.i, align 4
+ %cmp.i = icmp eq i32 %0, %1
+ br i1 %cmp.i, label %land.rhs.i, label %opeq1.exit
+
+land.rhs.i:
+ %second.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %a, i64 0, i32 1
+ %2 = load i32, i32* %second.i, align 4
+ %second2.i = getelementptr inbounds %"struct.std::pair", %"struct.std::pair"* %b, i64 0, i32 1
+ %3 = load i32, i32* %second2.i, align 4
+ %cmp3.i = icmp eq i32 %2, %3
+ br label %opeq1.exit
+
+opeq1.exit:
+ %4 = phi i1 [ false, %entry ], [ %cmp3.i, %land.rhs.i ]
+ ret i1 %4
+}
+
+
Added: llvm/trunk/test/CodeGen/X86/memcmp-optsize.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcmp-optsize.ll?rev=364416&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcmp-optsize.ll (added)
+++ llvm/trunk/test/CodeGen/X86/memcmp-optsize.ll Wed Jun 26 05:13:13 2019
@@ -0,0 +1,1013 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=cmov | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX2
+
+; This tests codegen time inlining/optimization of memcmp
+; rdar://6480398
+
+ at .str = private constant [65 x i8] c"0123456789012345678901234567890123456789012345678901234567890123\00", align 1
+
+declare i32 @memcmp(i8*, i8*, i64)
+declare i32 @bcmp(i8*, i8*, i64)
+
+define i32 @length2(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: rolw $8, %cx
+; X86-NEXT: rolw $8, %dx
+; X86-NEXT: movzwl %cx, %eax
+; X86-NEXT: movzwl %dx, %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: length2:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %cx, %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ ret i32 %m
+}
+
+define i1 @length2_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length2_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: cmpw (%eax), %cx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpw (%rsi), %ax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_eq_const(i8* %X) nounwind optsize {
+; X86-LABEL: length2_eq_const:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %eax
+; X86-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq_const:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 2) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length2_eq_nobuiltin_attr:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $2
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq_nobuiltin_attr:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $2, %edx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind nobuiltin
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length3(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length3:
+; X86: # %bb.0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: movzwl (%ecx), %esi
+; X86-NEXT: rolw $8, %dx
+; X86-NEXT: rolw $8, %si
+; X86-NEXT: cmpw %si, %dx
+; X86-NEXT: jne .LBB4_1
+; X86-NEXT: # %bb.2: # %loadbb1
+; X86-NEXT: movzbl 2(%eax), %eax
+; X86-NEXT: movzbl 2(%ecx), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: jmp .LBB4_3
+; X86-NEXT: .LBB4_1: # %res_block
+; X86-NEXT: setae %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: leal -1(%eax,%eax), %eax
+; X86-NEXT: .LBB4_3: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length3:
+; X64: # %bb.0: # %loadbb
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: cmpw %cx, %ax
+; X64-NEXT: jne .LBB4_1
+; X64-NEXT: # %bb.2: # %loadbb1
+; X64-NEXT: movzbl 2(%rdi), %eax
+; X64-NEXT: movzbl 2(%rsi), %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB4_1: # %res_block
+; X64-NEXT: setae %al
+; X64-NEXT: movzbl %al, %eax
+; X64-NEXT: leal -1(%rax,%rax), %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
+ ret i32 %m
+}
+
+define i1 @length3_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length3_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %edx
+; X86-NEXT: xorw (%eax), %dx
+; X86-NEXT: movb 2(%ecx), %cl
+; X86-NEXT: xorb 2(%eax), %cl
+; X86-NEXT: movzbl %cl, %eax
+; X86-NEXT: orw %dx, %ax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length3_eq:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: xorw (%rsi), %ax
+; X64-NEXT: movb 2(%rdi), %cl
+; X64-NEXT: xorb 2(%rsi), %cl
+; X64-NEXT: movzbl %cl, %ecx
+; X64-NEXT: orw %ax, %cx
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length4(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length4:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: seta %al
+; X86-NEXT: sbbl $0, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: length4:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: movl (%rsi), %edx
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: bswapl %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl %edx, %ecx
+; X64-NEXT: seta %al
+; X64-NEXT: sbbl $0, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ ret i32 %m
+}
+
+define i1 @length4_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length4_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: cmpl (%eax), %ecx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_eq:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: cmpl (%rsi), %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length4_eq_const(i8* %X) nounwind optsize {
+; X86-LABEL: length4_eq_const:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_eq_const:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 4) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length5(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length5:
+; X86: # %bb.0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl (%ecx), %esi
+; X86-NEXT: bswapl %edx
+; X86-NEXT: bswapl %esi
+; X86-NEXT: cmpl %esi, %edx
+; X86-NEXT: jne .LBB9_1
+; X86-NEXT: # %bb.2: # %loadbb1
+; X86-NEXT: movzbl 4(%eax), %eax
+; X86-NEXT: movzbl 4(%ecx), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: jmp .LBB9_3
+; X86-NEXT: .LBB9_1: # %res_block
+; X86-NEXT: setae %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: leal -1(%eax,%eax), %eax
+; X86-NEXT: .LBB9_3: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length5:
+; X64: # %bb.0: # %loadbb
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl (%rsi), %ecx
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: cmpl %ecx, %eax
+; X64-NEXT: jne .LBB9_1
+; X64-NEXT: # %bb.2: # %loadbb1
+; X64-NEXT: movzbl 4(%rdi), %eax
+; X64-NEXT: movzbl 4(%rsi), %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB9_1: # %res_block
+; X64-NEXT: setae %al
+; X64-NEXT: movzbl %al, %eax
+; X64-NEXT: leal -1(%rax,%rax), %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
+ ret i32 %m
+}
+
+define i1 @length5_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length5_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %edx
+; X86-NEXT: xorl (%eax), %edx
+; X86-NEXT: movb 4(%ecx), %cl
+; X86-NEXT: xorb 4(%eax), %cl
+; X86-NEXT: movzbl %cl, %eax
+; X86-NEXT: orl %edx, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length5_eq:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: xorl (%rsi), %eax
+; X64-NEXT: movb 4(%rdi), %cl
+; X64-NEXT: xorb 4(%rsi), %cl
+; X64-NEXT: movzbl %cl, %ecx
+; X64-NEXT: orl %eax, %ecx
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length8(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length8:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB11_2
+; X86-NEXT: # %bb.1: # %loadbb1
+; X86-NEXT: movl 4(%esi), %ecx
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: je .LBB11_3
+; X86-NEXT: .LBB11_2: # %res_block
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: setae %al
+; X86-NEXT: leal -1(%eax,%eax), %eax
+; X86-NEXT: .LBB11_3: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length8:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: seta %al
+; X64-NEXT: sbbl $0, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
+ ret i32 %m
+}
+
+define i1 @length8_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length8_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %edx
+; X86-NEXT: movl 4(%ecx), %ecx
+; X86-NEXT: xorl (%eax), %edx
+; X86-NEXT: xorl 4(%eax), %ecx
+; X86-NEXT: orl %edx, %ecx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length8_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length8_eq_const(i8* %X) nounwind optsize {
+; X86-LABEL: length8_eq_const:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl $858927408, %ecx # imm = 0x33323130
+; X86-NEXT: xorl (%eax), %ecx
+; X86-NEXT: movl $926299444, %edx # imm = 0x37363534
+; X86-NEXT: xorl 4(%eax), %edx
+; X86-NEXT: orl %ecx, %edx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length8_eq_const:
+; X64: # %bb.0:
+; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
+; X64-NEXT: cmpq %rax, (%rdi)
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 8) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length12_eq(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length12_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length12_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: xorq (%rsi), %rax
+; X64-NEXT: movl 8(%rdi), %ecx
+; X64-NEXT: xorl 8(%rsi), %ecx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length12(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length12:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length12:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB15_2
+; X64-NEXT: # %bb.1: # %loadbb1
+; X64-NEXT: movl 8(%rdi), %ecx
+; X64-NEXT: movl 8(%rsi), %edx
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: bswapl %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: je .LBB15_3
+; X64-NEXT: .LBB15_2: # %res_block
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: setae %al
+; X64-NEXT: leal -1(%rax,%rax), %eax
+; X64-NEXT: .LBB15_3: # %endblock
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
+ ret i32 %m
+}
+
+; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329
+
+define i32 @length16(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length16:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $16
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length16:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB16_2
+; X64-NEXT: # %bb.1: # %loadbb1
+; X64-NEXT: movq 8(%rdi), %rcx
+; X64-NEXT: movq 8(%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: je .LBB16_3
+; X64-NEXT: .LBB16_2: # %res_block
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: setae %al
+; X64-NEXT: leal -1(%rax,%rax), %eax
+; X64-NEXT: .LBB16_3: # %endblock
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind
+ ret i32 %m
+}
+
+define i1 @length16_eq(i8* %x, i8* %y) nounwind optsize {
+; X86-NOSSE-LABEL: length16_eq:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu (%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X86-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length16_eq:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length16_eq:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length16_eq_const(i8* %X) nounwind optsize {
+; X86-NOSSE-LABEL: length16_eq_const:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl $.L.str
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq_const:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length16_eq_const:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length16_eq_const:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+; PR33914 - https://bugs.llvm.org/show_bug.cgi?id=33914
+
+define i32 @length24(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length24:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $24
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length24:
+; X64: # %bb.0:
+; X64-NEXT: movl $24, %edx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind
+ ret i32 %m
+}
+
+define i1 @length24_eq(i8* %x, i8* %y) nounwind optsize {
+; X86-NOSSE-LABEL: length24_eq:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $24
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length24_eq:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu 8(%ecx), %xmm1
+; X86-SSE2-NEXT: movdqu (%eax), %xmm2
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X86-SSE2-NEXT: movdqu 8(%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; X86-SSE2-NEXT: pand %xmm2, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length24_eq:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X64-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X64-SSE2-NEXT: pand %xmm1, %xmm2
+; X64-SSE2-NEXT: pmovmskb %xmm2, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length24_eq:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX2-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
+; X64-AVX2-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind
+ %cmp = icmp eq i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length24_eq_const(i8* %X) nounwind optsize {
+; X86-NOSSE-LABEL: length24_eq_const:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $24
+; X86-NOSSE-NEXT: pushl $.L.str
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length24_eq_const:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand %xmm1, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length24_eq_const:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT: pand %xmm1, %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length24_eq_const:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm1, %xmm1
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-AVX2-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 24) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length32(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length32:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length32:
+; X64: # %bb.0:
+; X64-NEXT: movl $32, %edx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind
+ ret i32 %m
+}
+
+; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325
+
+define i1 @length32_eq(i8* %x, i8* %y) nounwind optsize {
+; X86-NOSSE-LABEL: length32_eq:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $32
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length32_eq:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1
+; X86-SSE2-NEXT: movdqu (%eax), %xmm2
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; X86-SSE2-NEXT: pand %xmm2, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm2
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0
+; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; X64-SSE2-NEXT: pand %xmm2, %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind
+ %cmp = icmp eq i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length32_eq_const(i8* %X) nounwind optsize {
+; X86-NOSSE-LABEL: length32_eq_const:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $32
+; X86-NOSSE-NEXT: pushl $.L.str
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE2-LABEL: length32_eq_const:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand %xmm1, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq_const:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT: pand %xmm1, %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq_const:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length64(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: length64:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length64:
+; X64: # %bb.0:
+; X64-NEXT: movl $64, %edx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind
+ ret i32 %m
+}
+
+define i1 @length64_eq(i8* %x, i8* %y) nounwind optsize {
+; X86-LABEL: length64_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length64_eq:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $64, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length64_eq:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
+; X64-AVX2-NEXT: vpcmpeqb 32(%rsi), %ymm1, %ymm1
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length64_eq_const(i8* %X) nounwind optsize {
+; X86-LABEL: length64_eq_const:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length64_eq_const:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $.L.str, %esi
+; X64-SSE2-NEXT: movl $64, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX2-LABEL: length64_eq_const:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm1, %ymm1
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 64) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @bcmp_length2(i8* %X, i8* %Y) nounwind optsize {
+; X86-LABEL: bcmp_length2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: rolw $8, %cx
+; X86-NEXT: rolw $8, %dx
+; X86-NEXT: movzwl %cx, %eax
+; X86-NEXT: movzwl %dx, %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: bcmp_length2:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %cx, %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @bcmp(i8* %X, i8* %Y, i64 2) nounwind
+ ret i32 %m
+}
+
Added: llvm/trunk/test/CodeGen/X86/memcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/X86/memcmp.ll?rev=364416&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/X86/memcmp.ll (added)
+++ llvm/trunk/test/CodeGen/X86/memcmp.ll Wed Jun 26 05:13:13 2019
@@ -0,0 +1,1685 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=cmov | FileCheck %s --check-prefix=X86 --check-prefix=X86-NOSSE
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse | FileCheck %s --check-prefix=X86 --check-prefix=SSE --check-prefix=X86-SSE1
+; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X86 --check-prefix=SSE --check-prefix=X86-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefix=X64 --check-prefix=X64-SSE2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=X64 --check-prefix=X64-AVX --check-prefix=X64-AVX2
+
+; This tests codegen time inlining/optimization of memcmp
+; rdar://6480398
+
+ at .str = private constant [65 x i8] c"0123456789012345678901234567890123456789012345678901234567890123\00", align 1
+
+declare i32 @memcmp(i8*, i8*, i64)
+
+define i32 @length0(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length0:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: length0:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind
+ ret i32 %m
+ }
+
+define i1 @length0_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length0_eq:
+; X86: # %bb.0:
+; X86-NEXT: movb $1, %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length0_eq:
+; X64: # %bb.0:
+; X64-NEXT: movb $1, %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length0_lt(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length0_lt:
+; X86: # %bb.0:
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: length0_lt:
+; X64: # %bb.0:
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 0) nounwind
+ %c = icmp slt i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length2(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length2:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: rolw $8, %cx
+; X86-NEXT: rolw $8, %dx
+; X86-NEXT: movzwl %cx, %eax
+; X86-NEXT: movzwl %dx, %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: length2:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %cx, %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ ret i32 %m
+}
+
+define i1 @length2_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length2_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: cmpw (%eax), %cx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpw (%rsi), %ax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_lt(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length2_lt:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: rolw $8, %cx
+; X86-NEXT: rolw $8, %dx
+; X86-NEXT: movzwl %cx, %eax
+; X86-NEXT: movzwl %dx, %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: shrl $31, %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_lt:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %cx, %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: shrl $31, %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ %c = icmp slt i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_gt(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length2_gt:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %ecx
+; X86-NEXT: movzwl (%eax), %eax
+; X86-NEXT: rolw $8, %cx
+; X86-NEXT: rolw $8, %ax
+; X86-NEXT: movzwl %cx, %ecx
+; X86-NEXT: movzwl %ax, %eax
+; X86-NEXT: subl %eax, %ecx
+; X86-NEXT: testl %ecx, %ecx
+; X86-NEXT: setg %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_gt:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: movzwl %ax, %eax
+; X64-NEXT: movzwl %cx, %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: setg %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind
+ %c = icmp sgt i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_eq_const(i8* %X) nounwind {
+; X86-LABEL: length2_eq_const:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %eax
+; X86-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq_const:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: cmpl $12849, %eax # imm = 0x3231
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 2) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length2_eq_nobuiltin_attr(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length2_eq_nobuiltin_attr:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $2
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length2_eq_nobuiltin_attr:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movl $2, %edx
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 2) nounwind nobuiltin
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length3(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length3:
+; X86: # %bb.0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movzwl (%eax), %edx
+; X86-NEXT: movzwl (%ecx), %esi
+; X86-NEXT: rolw $8, %dx
+; X86-NEXT: rolw $8, %si
+; X86-NEXT: cmpw %si, %dx
+; X86-NEXT: jne .LBB9_1
+; X86-NEXT: # %bb.2: # %loadbb1
+; X86-NEXT: movzbl 2(%eax), %eax
+; X86-NEXT: movzbl 2(%ecx), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+; X86-NEXT: .LBB9_1: # %res_block
+; X86-NEXT: setae %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: leal -1(%eax,%eax), %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length3:
+; X64: # %bb.0: # %loadbb
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: movzwl (%rsi), %ecx
+; X64-NEXT: rolw $8, %ax
+; X64-NEXT: rolw $8, %cx
+; X64-NEXT: cmpw %cx, %ax
+; X64-NEXT: jne .LBB9_1
+; X64-NEXT: # %bb.2: # %loadbb1
+; X64-NEXT: movzbl 2(%rdi), %eax
+; X64-NEXT: movzbl 2(%rsi), %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB9_1: # %res_block
+; X64-NEXT: setae %al
+; X64-NEXT: movzbl %al, %eax
+; X64-NEXT: leal -1(%rax,%rax), %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
+ ret i32 %m
+}
+
+define i1 @length3_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length3_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movzwl (%ecx), %edx
+; X86-NEXT: xorw (%eax), %dx
+; X86-NEXT: movb 2(%ecx), %cl
+; X86-NEXT: xorb 2(%eax), %cl
+; X86-NEXT: movzbl %cl, %eax
+; X86-NEXT: orw %dx, %ax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length3_eq:
+; X64: # %bb.0:
+; X64-NEXT: movzwl (%rdi), %eax
+; X64-NEXT: xorw (%rsi), %ax
+; X64-NEXT: movb 2(%rdi), %cl
+; X64-NEXT: xorb 2(%rsi), %cl
+; X64-NEXT: movzbl %cl, %ecx
+; X64-NEXT: orw %ax, %cx
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 3) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length4(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length4:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: seta %al
+; X86-NEXT: sbbl $0, %eax
+; X86-NEXT: retl
+;
+; X64-LABEL: length4:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: movl (%rsi), %edx
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: bswapl %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl %edx, %ecx
+; X64-NEXT: seta %al
+; X64-NEXT: sbbl $0, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ ret i32 %m
+}
+
+define i1 @length4_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length4_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: cmpl (%eax), %ecx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_eq:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: cmpl (%rsi), %eax
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length4_lt(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length4_lt:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: seta %al
+; X86-NEXT: sbbl $0, %eax
+; X86-NEXT: shrl $31, %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_lt:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %ecx
+; X64-NEXT: movl (%rsi), %edx
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: bswapl %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpl %edx, %ecx
+; X64-NEXT: seta %al
+; X64-NEXT: sbbl $0, %eax
+; X64-NEXT: shrl $31, %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ %c = icmp slt i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length4_gt(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length4_gt:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %ecx
+; X86-NEXT: movl (%eax), %eax
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %eax
+; X86-NEXT: xorl %edx, %edx
+; X86-NEXT: cmpl %eax, %ecx
+; X86-NEXT: seta %dl
+; X86-NEXT: sbbl $0, %edx
+; X86-NEXT: testl %edx, %edx
+; X86-NEXT: setg %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_gt:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl (%rsi), %ecx
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: xorl %edx, %edx
+; X64-NEXT: cmpl %ecx, %eax
+; X64-NEXT: seta %dl
+; X64-NEXT: sbbl $0, %edx
+; X64-NEXT: testl %edx, %edx
+; X64-NEXT: setg %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 4) nounwind
+ %c = icmp sgt i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length4_eq_const(i8* %X) nounwind {
+; X86-LABEL: length4_eq_const:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: cmpl $875770417, (%eax) # imm = 0x34333231
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length4_eq_const:
+; X64: # %bb.0:
+; X64-NEXT: cmpl $875770417, (%rdi) # imm = 0x34333231
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 1), i64 4) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length5(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length5:
+; X86: # %bb.0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl (%ecx), %esi
+; X86-NEXT: bswapl %edx
+; X86-NEXT: bswapl %esi
+; X86-NEXT: cmpl %esi, %edx
+; X86-NEXT: jne .LBB16_1
+; X86-NEXT: # %bb.2: # %loadbb1
+; X86-NEXT: movzbl 4(%eax), %eax
+; X86-NEXT: movzbl 4(%ecx), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+; X86-NEXT: .LBB16_1: # %res_block
+; X86-NEXT: setae %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: leal -1(%eax,%eax), %eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length5:
+; X64: # %bb.0: # %loadbb
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl (%rsi), %ecx
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: cmpl %ecx, %eax
+; X64-NEXT: jne .LBB16_1
+; X64-NEXT: # %bb.2: # %loadbb1
+; X64-NEXT: movzbl 4(%rdi), %eax
+; X64-NEXT: movzbl 4(%rsi), %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB16_1: # %res_block
+; X64-NEXT: setae %al
+; X64-NEXT: movzbl %al, %eax
+; X64-NEXT: leal -1(%rax,%rax), %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
+ ret i32 %m
+}
+
+define i1 @length5_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length5_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %edx
+; X86-NEXT: xorl (%eax), %edx
+; X86-NEXT: movb 4(%ecx), %cl
+; X86-NEXT: xorb 4(%eax), %cl
+; X86-NEXT: movzbl %cl, %eax
+; X86-NEXT: orl %edx, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length5_eq:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: xorl (%rsi), %eax
+; X64-NEXT: movb 4(%rdi), %cl
+; X64-NEXT: xorb 4(%rsi), %cl
+; X64-NEXT: movzbl %cl, %ecx
+; X64-NEXT: orl %eax, %ecx
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length5_lt(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length5_lt:
+; X86: # %bb.0: # %loadbb
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: movl (%ecx), %esi
+; X86-NEXT: bswapl %edx
+; X86-NEXT: bswapl %esi
+; X86-NEXT: cmpl %esi, %edx
+; X86-NEXT: jne .LBB18_1
+; X86-NEXT: # %bb.2: # %loadbb1
+; X86-NEXT: movzbl 4(%eax), %eax
+; X86-NEXT: movzbl 4(%ecx), %ecx
+; X86-NEXT: subl %ecx, %eax
+; X86-NEXT: jmp .LBB18_3
+; X86-NEXT: .LBB18_1: # %res_block
+; X86-NEXT: setae %al
+; X86-NEXT: movzbl %al, %eax
+; X86-NEXT: leal -1(%eax,%eax), %eax
+; X86-NEXT: .LBB18_3: # %endblock
+; X86-NEXT: shrl $31, %eax
+; X86-NEXT: # kill: def $al killed $al killed $eax
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length5_lt:
+; X64: # %bb.0: # %loadbb
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl (%rsi), %ecx
+; X64-NEXT: bswapl %eax
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: cmpl %ecx, %eax
+; X64-NEXT: jne .LBB18_1
+; X64-NEXT: # %bb.2: # %loadbb1
+; X64-NEXT: movzbl 4(%rdi), %eax
+; X64-NEXT: movzbl 4(%rsi), %ecx
+; X64-NEXT: subl %ecx, %eax
+; X64-NEXT: shrl $31, %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
+; X64-NEXT: retq
+; X64-NEXT: .LBB18_1: # %res_block
+; X64-NEXT: setae %al
+; X64-NEXT: movzbl %al, %eax
+; X64-NEXT: leal -1(%rax,%rax), %eax
+; X64-NEXT: shrl $31, %eax
+; X64-NEXT: # kill: def $al killed $al killed $eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 5) nounwind
+ %c = icmp slt i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length7_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length7_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %edx
+; X86-NEXT: movl 3(%ecx), %ecx
+; X86-NEXT: xorl (%eax), %edx
+; X86-NEXT: xorl 3(%eax), %ecx
+; X86-NEXT: orl %edx, %ecx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length7_eq:
+; X64: # %bb.0:
+; X64-NEXT: movl (%rdi), %eax
+; X64-NEXT: movl 3(%rdi), %ecx
+; X64-NEXT: xorl (%rsi), %eax
+; X64-NEXT: xorl 3(%rsi), %ecx
+; X64-NEXT: orl %eax, %ecx
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 7) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length8(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length8:
+; X86: # %bb.0:
+; X86-NEXT: pushl %esi
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %esi
+; X86-NEXT: movl (%esi), %ecx
+; X86-NEXT: movl (%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: jne .LBB20_2
+; X86-NEXT: # %bb.1: # %loadbb1
+; X86-NEXT: movl 4(%esi), %ecx
+; X86-NEXT: movl 4(%eax), %edx
+; X86-NEXT: bswapl %ecx
+; X86-NEXT: bswapl %edx
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: je .LBB20_3
+; X86-NEXT: .LBB20_2: # %res_block
+; X86-NEXT: xorl %eax, %eax
+; X86-NEXT: cmpl %edx, %ecx
+; X86-NEXT: setae %al
+; X86-NEXT: leal -1(%eax,%eax), %eax
+; X86-NEXT: .LBB20_3: # %endblock
+; X86-NEXT: popl %esi
+; X86-NEXT: retl
+;
+; X64-LABEL: length8:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: seta %al
+; X64-NEXT: sbbl $0, %eax
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
+ ret i32 %m
+}
+
+define i1 @length8_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length8_eq:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT: movl (%ecx), %edx
+; X86-NEXT: movl 4(%ecx), %ecx
+; X86-NEXT: xorl (%eax), %edx
+; X86-NEXT: xorl 4(%eax), %ecx
+; X86-NEXT: orl %edx, %ecx
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length8_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: cmpq (%rsi), %rax
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 8) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length8_eq_const(i8* %X) nounwind {
+; X86-LABEL: length8_eq_const:
+; X86: # %bb.0:
+; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-NEXT: movl $858927408, %ecx # imm = 0x33323130
+; X86-NEXT: xorl (%eax), %ecx
+; X86-NEXT: movl $926299444, %edx # imm = 0x37363534
+; X86-NEXT: xorl 4(%eax), %edx
+; X86-NEXT: orl %ecx, %edx
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length8_eq_const:
+; X64: # %bb.0:
+; X64-NEXT: movabsq $3978425819141910832, %rax # imm = 0x3736353433323130
+; X64-NEXT: cmpq %rax, (%rdi)
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 8) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length9_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length9_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $9
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length9_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: xorq (%rsi), %rax
+; X64-NEXT: movb 8(%rdi), %cl
+; X64-NEXT: xorb 8(%rsi), %cl
+; X64-NEXT: movzbl %cl, %ecx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length10_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length10_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $10
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length10_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: xorq (%rsi), %rax
+; X64-NEXT: movzwl 8(%rdi), %ecx
+; X64-NEXT: xorw 8(%rsi), %cx
+; X64-NEXT: movzwl %cx, %ecx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 10) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length11_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length11_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $11
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length11_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: movq 3(%rdi), %rcx
+; X64-NEXT: xorq (%rsi), %rax
+; X64-NEXT: xorq 3(%rsi), %rcx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 11) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length12_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length12_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length12_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: xorq (%rsi), %rax
+; X64-NEXT: movl 8(%rdi), %ecx
+; X64-NEXT: xorl 8(%rsi), %ecx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: setne %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length12(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length12:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $12
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length12:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB27_2
+; X64-NEXT: # %bb.1: # %loadbb1
+; X64-NEXT: movl 8(%rdi), %ecx
+; X64-NEXT: movl 8(%rsi), %edx
+; X64-NEXT: bswapl %ecx
+; X64-NEXT: bswapl %edx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: je .LBB27_3
+; X64-NEXT: .LBB27_2: # %res_block
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: setae %al
+; X64-NEXT: leal -1(%rax,%rax), %eax
+; X64-NEXT: .LBB27_3: # %endblock
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 12) nounwind
+ ret i32 %m
+}
+
+define i1 @length13_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length13_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $13
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length13_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: movq 5(%rdi), %rcx
+; X64-NEXT: xorq (%rsi), %rax
+; X64-NEXT: xorq 5(%rsi), %rcx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 13) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length14_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length14_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $14
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length14_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: movq 6(%rdi), %rcx
+; X64-NEXT: xorq (%rsi), %rax
+; X64-NEXT: xorq 6(%rsi), %rcx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 14) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+define i1 @length15_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length15_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $15
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: length15_eq:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rax
+; X64-NEXT: movq 7(%rdi), %rcx
+; X64-NEXT: xorq (%rsi), %rax
+; X64-NEXT: xorq 7(%rsi), %rcx
+; X64-NEXT: orq %rax, %rcx
+; X64-NEXT: sete %al
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 15) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+; PR33329 - https://bugs.llvm.org/show_bug.cgi?id=33329
+
+define i32 @length16(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length16:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $16
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length16:
+; X64: # %bb.0:
+; X64-NEXT: movq (%rdi), %rcx
+; X64-NEXT: movq (%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: jne .LBB31_2
+; X64-NEXT: # %bb.1: # %loadbb1
+; X64-NEXT: movq 8(%rdi), %rcx
+; X64-NEXT: movq 8(%rsi), %rdx
+; X64-NEXT: bswapq %rcx
+; X64-NEXT: bswapq %rdx
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: je .LBB31_3
+; X64-NEXT: .LBB31_2: # %res_block
+; X64-NEXT: xorl %eax, %eax
+; X64-NEXT: cmpq %rdx, %rcx
+; X64-NEXT: setae %al
+; X64-NEXT: leal -1(%rax,%rax), %eax
+; X64-NEXT: .LBB31_3: # %endblock
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 16) nounwind
+ ret i32 %m
+}
+
+define i1 @length16_eq(i8* %x, i8* %y) nounwind {
+; X86-NOSSE-LABEL: length16_eq:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: length16_eq:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl $0
+; X86-SSE1-NEXT: pushl $16
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: calll memcmp
+; X86-SSE1-NEXT: addl $16, %esp
+; X86-SSE1-NEXT: testl %eax, %eax
+; X86-SSE1-NEXT: setne %al
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu (%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X86-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length16_eq:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X64-SSE2-NEXT: pmovmskb %xmm1, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX-LABEL: length16_eq:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; X64-AVX-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX-NEXT: setne %al
+; X64-AVX-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 16) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length16_eq_const(i8* %X) nounwind {
+; X86-NOSSE-LABEL: length16_eq_const:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $16
+; X86-NOSSE-NEXT: pushl $.L.str
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: length16_eq_const:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl $0
+; X86-SSE1-NEXT: pushl $16
+; X86-SSE1-NEXT: pushl $.L.str
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: calll memcmp
+; X86-SSE1-NEXT: addl $16, %esp
+; X86-SSE1-NEXT: testl %eax, %eax
+; X86-SSE1-NEXT: sete %al
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: length16_eq_const:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length16_eq_const:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX-LABEL: length16_eq_const:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX-NEXT: sete %al
+; X64-AVX-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 16) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+; PR33914 - https://bugs.llvm.org/show_bug.cgi?id=33914
+
+define i32 @length24(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length24:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $24
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length24:
+; X64: # %bb.0:
+; X64-NEXT: movl $24, %edx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 24) nounwind
+ ret i32 %m
+}
+
+define i1 @length24_eq(i8* %x, i8* %y) nounwind {
+; X86-NOSSE-LABEL: length24_eq:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $24
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: length24_eq:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl $0
+; X86-SSE1-NEXT: pushl $24
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: calll memcmp
+; X86-SSE1-NEXT: addl $16, %esp
+; X86-SSE1-NEXT: testl %eax, %eax
+; X86-SSE1-NEXT: sete %al
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: length24_eq:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu 8(%ecx), %xmm1
+; X86-SSE2-NEXT: movdqu (%eax), %xmm2
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X86-SSE2-NEXT: movdqu 8(%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; X86-SSE2-NEXT: pand %xmm2, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length24_eq:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm1
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
+; X64-SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
+; X64-SSE2-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X64-SSE2-NEXT: pand %xmm1, %xmm2
+; X64-SSE2-NEXT: pmovmskb %xmm2, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX-LABEL: length24_eq:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero
+; X64-AVX-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1
+; X64-AVX-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; X64-AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX-NEXT: sete %al
+; X64-AVX-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 24) nounwind
+ %cmp = icmp eq i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length24_eq_const(i8* %X) nounwind {
+; X86-NOSSE-LABEL: length24_eq_const:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $24
+; X86-NOSSE-NEXT: pushl $.L.str
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: length24_eq_const:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl $0
+; X86-SSE1-NEXT: pushl $24
+; X86-SSE1-NEXT: pushl $.L.str
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: calll memcmp
+; X86-SSE1-NEXT: addl $16, %esp
+; X86-SSE1-NEXT: testl %eax, %eax
+; X86-SSE1-NEXT: setne %al
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: length24_eq_const:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: movdqu 8(%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand %xmm1, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length24_eq_const:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT: pand %xmm1, %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX-LABEL: length24_eq_const:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
+; X64-AVX-NEXT: vpcmpeqb {{.*}}(%rip), %xmm1, %xmm1
+; X64-AVX-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX-NEXT: setne %al
+; X64-AVX-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 24) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length32(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length32:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $32
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length32:
+; X64: # %bb.0:
+; X64-NEXT: movl $32, %edx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 32) nounwind
+ ret i32 %m
+}
+
+; PR33325 - https://bugs.llvm.org/show_bug.cgi?id=33325
+
+define i1 @length32_eq(i8* %x, i8* %y) nounwind {
+; X86-NOSSE-LABEL: length32_eq:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $32
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: length32_eq:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl $0
+; X86-SSE1-NEXT: pushl $32
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: calll memcmp
+; X86-SSE1-NEXT: addl $16, %esp
+; X86-SSE1-NEXT: testl %eax, %eax
+; X86-SSE1-NEXT: sete %al
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: length32_eq:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1
+; X86-SSE2-NEXT: movdqu (%eax), %xmm2
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; X86-SSE2-NEXT: pand %xmm2, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm2
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0
+; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; X64-SSE2-NEXT: pand %xmm2, %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX1-LABEL: length32_eq:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
+; X64-AVX1-NEXT: vpcmpeqb 16(%rsi), %xmm1, %xmm1
+; X64-AVX1-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; X64-AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX1-NEXT: sete %al
+; X64-AVX1-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind
+ %cmp = icmp eq i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length32_eq_prefer128(i8* %x, i8* %y) nounwind "prefer-vector-width"="128" {
+; X86-NOSSE-LABEL: length32_eq_prefer128:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $32
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: sete %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: length32_eq_prefer128:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl $0
+; X86-SSE1-NEXT: pushl $32
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: calll memcmp
+; X86-SSE1-NEXT: addl $16, %esp
+; X86-SSE1-NEXT: testl %eax, %eax
+; X86-SSE1-NEXT: sete %al
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: length32_eq_prefer128:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT: movdqu (%ecx), %xmm0
+; X86-SSE2-NEXT: movdqu 16(%ecx), %xmm1
+; X86-SSE2-NEXT: movdqu (%eax), %xmm2
+; X86-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X86-SSE2-NEXT: movdqu 16(%eax), %xmm0
+; X86-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; X86-SSE2-NEXT: pand %xmm2, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: sete %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq_prefer128:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1
+; X64-SSE2-NEXT: movdqu (%rsi), %xmm2
+; X64-SSE2-NEXT: pcmpeqb %xmm0, %xmm2
+; X64-SSE2-NEXT: movdqu 16(%rsi), %xmm0
+; X64-SSE2-NEXT: pcmpeqb %xmm1, %xmm0
+; X64-SSE2-NEXT: pand %xmm2, %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX-LABEL: length32_eq_prefer128:
+; X64-AVX: # %bb.0:
+; X64-AVX-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX-NEXT: vmovdqu 16(%rdi), %xmm1
+; X64-AVX-NEXT: vpcmpeqb 16(%rsi), %xmm1, %xmm1
+; X64-AVX-NEXT: vpcmpeqb (%rsi), %xmm0, %xmm0
+; X64-AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-AVX-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX-NEXT: sete %al
+; X64-AVX-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32) nounwind
+ %cmp = icmp eq i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length32_eq_const(i8* %X) nounwind {
+; X86-NOSSE-LABEL: length32_eq_const:
+; X86-NOSSE: # %bb.0:
+; X86-NOSSE-NEXT: pushl $0
+; X86-NOSSE-NEXT: pushl $32
+; X86-NOSSE-NEXT: pushl $.L.str
+; X86-NOSSE-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NOSSE-NEXT: calll memcmp
+; X86-NOSSE-NEXT: addl $16, %esp
+; X86-NOSSE-NEXT: testl %eax, %eax
+; X86-NOSSE-NEXT: setne %al
+; X86-NOSSE-NEXT: retl
+;
+; X86-SSE1-LABEL: length32_eq_const:
+; X86-SSE1: # %bb.0:
+; X86-SSE1-NEXT: pushl $0
+; X86-SSE1-NEXT: pushl $32
+; X86-SSE1-NEXT: pushl $.L.str
+; X86-SSE1-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-SSE1-NEXT: calll memcmp
+; X86-SSE1-NEXT: addl $16, %esp
+; X86-SSE1-NEXT: testl %eax, %eax
+; X86-SSE1-NEXT: setne %al
+; X86-SSE1-NEXT: retl
+;
+; X86-SSE2-LABEL: length32_eq_const:
+; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT: movdqu (%eax), %xmm0
+; X86-SSE2-NEXT: movdqu 16(%eax), %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm1
+; X86-SSE2-NEXT: pcmpeqb {{\.LCPI.*}}, %xmm0
+; X86-SSE2-NEXT: pand %xmm1, %xmm0
+; X86-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X86-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X86-SSE2-NEXT: setne %al
+; X86-SSE2-NEXT: retl
+;
+; X64-SSE2-LABEL: length32_eq_const:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: movdqu (%rdi), %xmm0
+; X64-SSE2-NEXT: movdqu 16(%rdi), %xmm1
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm1
+; X64-SSE2-NEXT: pcmpeqb {{.*}}(%rip), %xmm0
+; X64-SSE2-NEXT: pand %xmm1, %xmm0
+; X64-SSE2-NEXT: pmovmskb %xmm0, %eax
+; X64-SSE2-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX1-LABEL: length32_eq_const:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: vmovdqu (%rdi), %xmm0
+; X64-AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
+; X64-AVX1-NEXT: vpcmpeqb {{.*}}(%rip), %xmm1, %xmm1
+; X64-AVX1-NEXT: vpcmpeqb {{.*}}(%rip), %xmm0, %xmm0
+; X64-AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
+; X64-AVX1-NEXT: vpmovmskb %xmm0, %eax
+; X64-AVX1-NEXT: cmpl $65535, %eax # imm = 0xFFFF
+; X64-AVX1-NEXT: setne %al
+; X64-AVX1-NEXT: retq
+;
+; X64-AVX2-LABEL: length32_eq_const:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 32) nounwind
+ %c = icmp ne i32 %m, 0
+ ret i1 %c
+}
+
+define i32 @length64(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: length64:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: length64:
+; X64: # %bb.0:
+; X64-NEXT: movl $64, %edx
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 64) nounwind
+ ret i32 %m
+}
+
+define i1 @length64_eq(i8* %x, i8* %y) nounwind {
+; X86-LABEL: length64_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: setne %al
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length64_eq:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $64, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: setne %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX1-LABEL: length64_eq:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: pushq %rax
+; X64-AVX1-NEXT: movl $64, %edx
+; X64-AVX1-NEXT: callq memcmp
+; X64-AVX1-NEXT: testl %eax, %eax
+; X64-AVX1-NEXT: setne %al
+; X64-AVX1-NEXT: popq %rcx
+; X64-AVX1-NEXT: retq
+;
+; X64-AVX2-LABEL: length64_eq:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
+; X64-AVX2-NEXT: vpcmpeqb 32(%rsi), %ymm1, %ymm1
+; X64-AVX2-NEXT: vpcmpeqb (%rsi), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: setne %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 64) nounwind
+ %cmp = icmp ne i32 %call, 0
+ ret i1 %cmp
+}
+
+define i1 @length64_eq_const(i8* %X) nounwind {
+; X86-LABEL: length64_eq_const:
+; X86: # %bb.0:
+; X86-NEXT: pushl $0
+; X86-NEXT: pushl $64
+; X86-NEXT: pushl $.L.str
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-SSE2-LABEL: length64_eq_const:
+; X64-SSE2: # %bb.0:
+; X64-SSE2-NEXT: pushq %rax
+; X64-SSE2-NEXT: movl $.L.str, %esi
+; X64-SSE2-NEXT: movl $64, %edx
+; X64-SSE2-NEXT: callq memcmp
+; X64-SSE2-NEXT: testl %eax, %eax
+; X64-SSE2-NEXT: sete %al
+; X64-SSE2-NEXT: popq %rcx
+; X64-SSE2-NEXT: retq
+;
+; X64-AVX1-LABEL: length64_eq_const:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: pushq %rax
+; X64-AVX1-NEXT: movl $.L.str, %esi
+; X64-AVX1-NEXT: movl $64, %edx
+; X64-AVX1-NEXT: callq memcmp
+; X64-AVX1-NEXT: testl %eax, %eax
+; X64-AVX1-NEXT: sete %al
+; X64-AVX1-NEXT: popq %rcx
+; X64-AVX1-NEXT: retq
+;
+; X64-AVX2-LABEL: length64_eq_const:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: vmovdqu (%rdi), %ymm0
+; X64-AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm1, %ymm1
+; X64-AVX2-NEXT: vpcmpeqb {{.*}}(%rip), %ymm0, %ymm0
+; X64-AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
+; X64-AVX2-NEXT: vpmovmskb %ymm0, %eax
+; X64-AVX2-NEXT: cmpl $-1, %eax
+; X64-AVX2-NEXT: sete %al
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* getelementptr inbounds ([65 x i8], [65 x i8]* @.str, i32 0, i32 0), i64 64) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+; This checks that we do not do stupid things with huge sizes.
+define i32 @huge_length(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: huge_length:
+; X86: # %bb.0:
+; X86-NEXT: pushl $2147483647 # imm = 0x7FFFFFFF
+; X86-NEXT: pushl $-1
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: retl
+;
+; X64-LABEL: huge_length:
+; X64: # %bb.0:
+; X64-NEXT: movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind
+ ret i32 %m
+}
+
+define i1 @huge_length_eq(i8* %X, i8* %Y) nounwind {
+; X86-LABEL: huge_length_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl $2147483647 # imm = 0x7FFFFFFF
+; X86-NEXT: pushl $-1
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: huge_length_eq:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: movabsq $9223372036854775807, %rdx # imm = 0x7FFFFFFFFFFFFFFF
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 9223372036854775807) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
+
+; This checks non-constant sizes.
+define i32 @nonconst_length(i8* %X, i8* %Y, i64 %size) nounwind {
+; X86-LABEL: nonconst_length:
+; X86: # %bb.0:
+; X86-NEXT: jmp memcmp # TAILCALL
+;
+; X64-LABEL: nonconst_length:
+; X64: # %bb.0:
+; X64-NEXT: jmp memcmp # TAILCALL
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 %size) nounwind
+ ret i32 %m
+}
+
+define i1 @nonconst_length_eq(i8* %X, i8* %Y, i64 %size) nounwind {
+; X86-LABEL: nonconst_length_eq:
+; X86: # %bb.0:
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: pushl {{[0-9]+}}(%esp)
+; X86-NEXT: calll memcmp
+; X86-NEXT: addl $16, %esp
+; X86-NEXT: testl %eax, %eax
+; X86-NEXT: sete %al
+; X86-NEXT: retl
+;
+; X64-LABEL: nonconst_length_eq:
+; X64: # %bb.0:
+; X64-NEXT: pushq %rax
+; X64-NEXT: callq memcmp
+; X64-NEXT: testl %eax, %eax
+; X64-NEXT: sete %al
+; X64-NEXT: popq %rcx
+; X64-NEXT: retq
+ %m = tail call i32 @memcmp(i8* %X, i8* %Y, i64 %size) nounwind
+ %c = icmp eq i32 %m, 0
+ ret i1 %c
+}
Modified: llvm/trunk/test/Other/opt-O2-pipeline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Other/opt-O2-pipeline.ll?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/test/Other/opt-O2-pipeline.ll (original)
+++ llvm/trunk/test/Other/opt-O2-pipeline.ll Wed Jun 26 05:13:13 2019
@@ -136,11 +136,6 @@
; CHECK-NEXT: Function Alias Analysis Results
; CHECK-NEXT: Memory Dependence Analysis
; CHECK-NEXT: MemCpy Optimization
-; CHECK-NEXT: Basic Alias Analysis (stateless AA impl)
-; CHECK-NEXT: Function Alias Analysis Results
-; CHECK-NEXT: Merge contiguous icmps into a memcmp
-; CHECK-NEXT: Expand memcmp() to load/stores
-; CHECK-NEXT: Early CSE
; CHECK-NEXT: Sparse Conditional Constant Propagation
; CHECK-NEXT: Demanded bits analysis
; CHECK-NEXT: Bit-Tracking Dead Code Elimination
Modified: llvm/trunk/test/Other/opt-O3-pipeline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Other/opt-O3-pipeline.ll?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/test/Other/opt-O3-pipeline.ll (original)
+++ llvm/trunk/test/Other/opt-O3-pipeline.ll Wed Jun 26 05:13:13 2019
@@ -141,11 +141,6 @@
; CHECK-NEXT: Function Alias Analysis Results
; CHECK-NEXT: Memory Dependence Analysis
; CHECK-NEXT: MemCpy Optimization
-; CHECK-NEXT: Basic Alias Analysis (stateless AA impl)
-; CHECK-NEXT: Function Alias Analysis Results
-; CHECK-NEXT: Merge contiguous icmps into a memcmp
-; CHECK-NEXT: Expand memcmp() to load/stores
-; CHECK-NEXT: Early CSE
; CHECK-NEXT: Sparse Conditional Constant Propagation
; CHECK-NEXT: Demanded bits analysis
; CHECK-NEXT: Bit-Tracking Dead Code Elimination
Modified: llvm/trunk/test/Other/opt-Os-pipeline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Other/opt-Os-pipeline.ll?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/test/Other/opt-Os-pipeline.ll (original)
+++ llvm/trunk/test/Other/opt-Os-pipeline.ll Wed Jun 26 05:13:13 2019
@@ -123,11 +123,6 @@
; CHECK-NEXT: Function Alias Analysis Results
; CHECK-NEXT: Memory Dependence Analysis
; CHECK-NEXT: MemCpy Optimization
-; CHECK-NEXT: Basic Alias Analysis (stateless AA impl)
-; CHECK-NEXT: Function Alias Analysis Results
-; CHECK-NEXT: Merge contiguous icmps into a memcmp
-; CHECK-NEXT: Expand memcmp() to load/stores
-; CHECK-NEXT: Early CSE
; CHECK-NEXT: Sparse Conditional Constant Propagation
; CHECK-NEXT: Demanded bits analysis
; CHECK-NEXT: Bit-Tracking Dead Code Elimination
Modified: llvm/trunk/test/Transforms/ExpandMemCmp/X86/memcmp.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ExpandMemCmp/X86/memcmp.ll?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ExpandMemCmp/X86/memcmp.ll (original)
+++ llvm/trunk/test/Transforms/ExpandMemCmp/X86/memcmp.ll Wed Jun 26 05:13:13 2019
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt -S -domtree -expandmemcmp -verify-dom-info -mtriple=i686-unknown-unknown -data-layout=e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X32
-; RUN: opt -S -domtree -expandmemcmp -verify-dom-info -memcmp-num-loads-per-block=1 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 -mattr=+avx2 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_1LD
-; RUN: opt -S -domtree -expandmemcmp -verify-dom-info -memcmp-num-loads-per-block=2 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 -mattr=+avx2 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_2LD
+; RUN: opt -S -expandmemcmp -mtriple=i686-unknown-unknown -data-layout=e-m:o-p:32:32-f64:32:64-f80:128-n8:16:32-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X32
+; RUN: opt -S -expandmemcmp -memcmp-num-loads-per-block=1 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_1LD
+; RUN: opt -S -expandmemcmp -memcmp-num-loads-per-block=2 -mtriple=x86_64-unknown-unknown -data-layout=e-m:o-i64:64-f80:128-n8:16:32:64-S128 < %s | FileCheck %s --check-prefix=ALL --check-prefix=X64 --check-prefix=X64_2LD
declare i32 @memcmp(i8* nocapture, i8* nocapture, i64)
@@ -1215,88 +1215,5 @@ define i32 @cmp_eq16(i8* nocapture reado
%cmp = icmp eq i32 %call, 0
%conv = zext i1 %cmp to i32
ret i32 %conv
-}
-
-define i32 @cmp_eq32(i8* nocapture readonly %x, i8* nocapture readonly %y) {
-; X32-LABEL: @cmp_eq32(
-; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 32)
-; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; X32-NEXT: ret i32 [[CONV]]
-;
-; X64-LABEL: @cmp_eq32(
-; X64-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i256*
-; X64-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i256*
-; X64-NEXT: [[TMP3:%.*]] = load i256, i256* [[TMP1]]
-; X64-NEXT: [[TMP4:%.*]] = load i256, i256* [[TMP2]]
-; X64-NEXT: [[TMP5:%.*]] = icmp ne i256 [[TMP3]], [[TMP4]]
-; X64-NEXT: [[TMP6:%.*]] = zext i1 [[TMP5]] to i32
-; X64-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP6]], 0
-; X64-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; X64-NEXT: ret i32 [[CONV]]
-;
- %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32)
- %cmp = icmp eq i32 %call, 0
- %conv = zext i1 %cmp to i32
- ret i32 %conv
-}
-
-define i32 @cmp_eq32_prefer128(i8* nocapture readonly %x, i8* nocapture readonly %y) "prefer-vector-width"="128" {
-; X32-LABEL: @cmp_eq32_prefer128(
-; X32-NEXT: [[CALL:%.*]] = tail call i32 @memcmp(i8* [[X:%.*]], i8* [[Y:%.*]], i64 32)
-; X32-NEXT: [[CMP:%.*]] = icmp eq i32 [[CALL]], 0
-; X32-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; X32-NEXT: ret i32 [[CONV]]
-;
-; X64_1LD-LABEL: @cmp_eq32_prefer128(
-; X64_1LD-NEXT: br label [[LOADBB:%.*]]
-; X64_1LD: res_block:
-; X64_1LD-NEXT: br label [[ENDBLOCK:%.*]]
-; X64_1LD: loadbb:
-; X64_1LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128*
-; X64_1LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128*
-; X64_1LD-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]]
-; X64_1LD-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]]
-; X64_1LD-NEXT: [[TMP5:%.*]] = icmp ne i128 [[TMP3]], [[TMP4]]
-; X64_1LD-NEXT: br i1 [[TMP5]], label [[RES_BLOCK:%.*]], label [[LOADBB1:%.*]]
-; X64_1LD: loadbb1:
-; X64_1LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 16
-; X64_1LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i128*
-; X64_1LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 16
-; X64_1LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i128*
-; X64_1LD-NEXT: [[TMP10:%.*]] = load i128, i128* [[TMP7]]
-; X64_1LD-NEXT: [[TMP11:%.*]] = load i128, i128* [[TMP9]]
-; X64_1LD-NEXT: [[TMP12:%.*]] = icmp ne i128 [[TMP10]], [[TMP11]]
-; X64_1LD-NEXT: br i1 [[TMP12]], label [[RES_BLOCK]], label [[ENDBLOCK]]
-; X64_1LD: endblock:
-; X64_1LD-NEXT: [[PHI_RES:%.*]] = phi i32 [ 0, [[LOADBB1]] ], [ 1, [[RES_BLOCK]] ]
-; X64_1LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[PHI_RES]], 0
-; X64_1LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; X64_1LD-NEXT: ret i32 [[CONV]]
-;
-; X64_2LD-LABEL: @cmp_eq32_prefer128(
-; X64_2LD-NEXT: [[TMP1:%.*]] = bitcast i8* [[X:%.*]] to i128*
-; X64_2LD-NEXT: [[TMP2:%.*]] = bitcast i8* [[Y:%.*]] to i128*
-; X64_2LD-NEXT: [[TMP3:%.*]] = load i128, i128* [[TMP1]]
-; X64_2LD-NEXT: [[TMP4:%.*]] = load i128, i128* [[TMP2]]
-; X64_2LD-NEXT: [[TMP5:%.*]] = xor i128 [[TMP3]], [[TMP4]]
-; X64_2LD-NEXT: [[TMP6:%.*]] = getelementptr i8, i8* [[X]], i8 16
-; X64_2LD-NEXT: [[TMP7:%.*]] = bitcast i8* [[TMP6]] to i128*
-; X64_2LD-NEXT: [[TMP8:%.*]] = getelementptr i8, i8* [[Y]], i8 16
-; X64_2LD-NEXT: [[TMP9:%.*]] = bitcast i8* [[TMP8]] to i128*
-; X64_2LD-NEXT: [[TMP10:%.*]] = load i128, i128* [[TMP7]]
-; X64_2LD-NEXT: [[TMP11:%.*]] = load i128, i128* [[TMP9]]
-; X64_2LD-NEXT: [[TMP12:%.*]] = xor i128 [[TMP10]], [[TMP11]]
-; X64_2LD-NEXT: [[TMP13:%.*]] = or i128 [[TMP5]], [[TMP12]]
-; X64_2LD-NEXT: [[TMP14:%.*]] = icmp ne i128 [[TMP13]], 0
-; X64_2LD-NEXT: [[TMP15:%.*]] = zext i1 [[TMP14]] to i32
-; X64_2LD-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP15]], 0
-; X64_2LD-NEXT: [[CONV:%.*]] = zext i1 [[CMP]] to i32
-; X64_2LD-NEXT: ret i32 [[CONV]]
-;
- %call = tail call i32 @memcmp(i8* %x, i8* %y, i64 32)
- %cmp = icmp eq i32 %call, 0
- %conv = zext i1 %cmp to i32
- ret i32 %conv
}
Removed: llvm/trunk/test/Transforms/ExpandMemCmp/X86/pr36421.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ExpandMemCmp/X86/pr36421.ll?rev=364415&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/ExpandMemCmp/X86/pr36421.ll (original)
+++ llvm/trunk/test/Transforms/ExpandMemCmp/X86/pr36421.ll (removed)
@@ -1,79 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -domtree -expandmemcmp -verify-dom-info -S | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-unknown-unknown"
-
- at .str = private unnamed_addr constant [7 x i8] c"abcdef\00", align 1
- at .str.1 = private unnamed_addr constant [7 x i8] c"ABCDEF\00", align 1
-
-define i32 @test(i8* nocapture readonly %string, i32 %len) local_unnamed_addr #0 {
-; CHECK-LABEL: @test(
-; CHECK-NEXT: entry:
-; CHECK-NEXT: [[COND:%.*]] = icmp eq i32 [[LEN:%.*]], 6
-; CHECK-NEXT: br i1 [[COND]], label [[SW_BB:%.*]], label [[RETURN:%.*]]
-; CHECK: sw.bb:
-; CHECK-NEXT: [[TMP0:%.*]] = bitcast i8* [[STRING:%.*]] to i32*
-; CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[TMP0]]
-; CHECK-NEXT: [[TMP2:%.*]] = xor i32 [[TMP1]], 1684234849
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, i8* [[STRING]], i8 4
-; CHECK-NEXT: [[TMP4:%.*]] = bitcast i8* [[TMP3]] to i16*
-; CHECK-NEXT: [[TMP5:%.*]] = load i16, i16* [[TMP4]]
-; CHECK-NEXT: [[TMP6:%.*]] = zext i16 [[TMP5]] to i32
-; CHECK-NEXT: [[TMP7:%.*]] = xor i32 [[TMP6]], 26213
-; CHECK-NEXT: [[TMP8:%.*]] = or i32 [[TMP2]], [[TMP7]]
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ne i32 [[TMP8]], 0
-; CHECK-NEXT: [[TMP10:%.*]] = zext i1 [[TMP9]] to i32
-; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[TMP10]], 0
-; CHECK-NEXT: br i1 [[CMP]], label [[RETURN]], label [[IF_END:%.*]]
-; CHECK: if.end:
-; CHECK-NEXT: [[TMP11:%.*]] = bitcast i8* [[STRING]] to i32*
-; CHECK-NEXT: [[TMP12:%.*]] = load i32, i32* [[TMP11]]
-; CHECK-NEXT: [[TMP13:%.*]] = xor i32 [[TMP12]], 1145258561
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr i8, i8* [[STRING]], i8 4
-; CHECK-NEXT: [[TMP15:%.*]] = bitcast i8* [[TMP14]] to i16*
-; CHECK-NEXT: [[TMP16:%.*]] = load i16, i16* [[TMP15]]
-; CHECK-NEXT: [[TMP17:%.*]] = zext i16 [[TMP16]] to i32
-; CHECK-NEXT: [[TMP18:%.*]] = xor i32 [[TMP17]], 17989
-; CHECK-NEXT: [[TMP19:%.*]] = or i32 [[TMP13]], [[TMP18]]
-; CHECK-NEXT: [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0
-; CHECK-NEXT: [[TMP21:%.*]] = zext i1 [[TMP20]] to i32
-; CHECK-NEXT: [[CMP2:%.*]] = icmp eq i32 [[TMP21]], 0
-; CHECK-NEXT: [[DOT:%.*]] = select i1 [[CMP2]], i32 64, i32 0
-; CHECK-NEXT: br label [[RETURN]]
-; CHECK: return:
-; CHECK-NEXT: [[RETVAL_0:%.*]] = phi i32 [ 61, [[SW_BB]] ], [ [[DOT]], [[IF_END]] ], [ 0, [[ENTRY:%.*]] ]
-; CHECK-NEXT: ret i32 [[RETVAL_0]]
-;
-entry:
- %cond = icmp eq i32 %len, 6
- br i1 %cond, label %sw.bb, label %return
-
-sw.bb: ; preds = %entry
- %call = tail call i32 @memcmp(i8* %string, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str, i64 0, i64 0), i64 6)
- %cmp = icmp eq i32 %call, 0
- br i1 %cmp, label %return, label %if.end
-
-if.end: ; preds = %sw.bb
- %call1 = tail call i32 @memcmp(i8* %string, i8* getelementptr inbounds ([7 x i8], [7 x i8]* @.str.1, i64 0, i64 0), i64 6)
- %cmp2 = icmp eq i32 %call1, 0
- %. = select i1 %cmp2, i32 64, i32 0
- br label %return
-
-return: ; preds = %entry, %if.end8, %if.end4, %if.end, %sw.bb
- %retval.0 = phi i32 [ 61, %sw.bb ], [ %., %if.end ], [ 0, %entry ]
- ret i32 %retval.0
-}
-
-; Function Attrs: nounwind readonly
-declare i32 @memcmp(i8* nocapture, i8* nocapture, i64) local_unnamed_addr #1
-
-attributes #0 = { nounwind readonly ssp uwtable "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-jump-tables"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-attributes #1 = { nounwind readonly "correctly-rounded-divide-sqrt-fp-math"="false" "disable-tail-calls"="false" "less-precise-fpmad"="false" "no-frame-pointer-elim"="false" "no-infs-fp-math"="false" "no-nans-fp-math"="false" "no-signed-zeros-fp-math"="false" "no-trapping-math"="false" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+fxsr,+mmx,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "unsafe-fp-math"="false" "use-soft-float"="false" }
-
-!llvm.module.flags = !{!0, !1}
-!llvm.ident = !{!2}
-
-!0 = !{i32 1, !"wchar_size", i32 4}
-!1 = !{i32 7, !"PIC Level", i32 2}
-!2 = !{!"clang version 7.0.0 (trunk 325350)"}
Modified: llvm/trunk/tools/opt/opt.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/tools/opt/opt.cpp?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/tools/opt/opt.cpp (original)
+++ llvm/trunk/tools/opt/opt.cpp Wed Jun 26 05:13:13 2019
@@ -514,6 +514,7 @@ int main(int argc, char **argv) {
initializeTarget(Registry);
// For codegen passes, only passes that do IR to IR transformation are
// supported.
+ initializeExpandMemCmpPassPass(Registry);
initializeScalarizeMaskedMemIntrinPass(Registry);
initializeCodeGenPreparePass(Registry);
initializeAtomicExpandPass(Registry);
Modified: llvm/trunk/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn (original)
+++ llvm/trunk/utils/gn/secondary/llvm/lib/CodeGen/BUILD.gn Wed Jun 26 05:13:13 2019
@@ -39,6 +39,7 @@ static_library("CodeGen") {
"EarlyIfConversion.cpp",
"EdgeBundles.cpp",
"ExecutionDomainFix.cpp",
+ "ExpandMemCmp.cpp",
"ExpandPostRAPseudos.cpp",
"ExpandReductions.cpp",
"FEntryInserter.cpp",
Modified: llvm/trunk/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn?rev=364416&r1=364415&r2=364416&view=diff
==============================================================================
--- llvm/trunk/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn (original)
+++ llvm/trunk/utils/gn/secondary/llvm/lib/Transforms/Scalar/BUILD.gn Wed Jun 26 05:13:13 2019
@@ -21,7 +21,6 @@ static_library("Scalar") {
"DeadStoreElimination.cpp",
"DivRemPairs.cpp",
"EarlyCSE.cpp",
- "ExpandMemCmp.cpp",
"FlattenCFGPass.cpp",
"Float2Int.cpp",
"GVN.cpp",
More information about the llvm-commits
mailing list