[llvm] r242413 - Correct lowering of memmove in NVPTX
Larisse Voufo
lvoufo at google.com
Thu Jul 16 12:46:40 PDT 2015
On Thu, Jul 16, 2015 at 10:39 AM, Eli Bendersky <eliben at google.com> wrote:
>
>
> On Thu, Jul 16, 2015 at 10:36 AM, Benjamin Kramer <benny.kra at gmail.com>
> wrote:
>
>>
>> > On 16.07.2015, at 19:01, Eli Bendersky <eliben at google.com> wrote:
>> >
>> > Larisee, could you please point to a bot you see failing? [or more
>> details about the platform]
>>
>> This was fixed in r242417.
>>
>
> Thanks Benjamin, you're quick :) I was just testing my fix patch when I
> saw this
>
> Eli
>
Sorry I got side-tracked. It does look fixed on my end now.
-- Larisse.
>
>
>>
>> >
>> > On Thu, Jul 16, 2015 at 9:59 AM, Eli Bendersky <eliben at google.com>
>> wrote:
>> > Thanks Lairsse, I'm looking into it right now
>> >
>> > On Thu, Jul 16, 2015 at 9:57 AM, Larisse Voufo <lvoufo at google.com>
>> wrote:
>> >
>> >
>> > On Thu, Jul 16, 2015 at 9:27 AM, Eli Bendersky <eliben at google.com>
>> wrote:
>> > Author: eliben
>> > Date: Thu Jul 16 11:27:19 2015
>> > New Revision: 242413
>> >
>> > URL: http://llvm.org/viewvc/llvm-project?rev=242413&view=rev
>> > Log:
>> > Correct lowering of memmove in NVPTX
>> >
>> > This fixes https://llvm.org/bugs/show_bug.cgi?id=24056
>> >
>> > Also a bit of refactoring along the way.
>> >
>> > Differential Revision: http://reviews.llvm.org/D11220
>> >
>> > Modified:
>> > llvm/trunk/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
>> > llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp
>> >
>> >
>> > llvm/trunk/test/CodeGen/NVPTX/lower-aggr-copies.ll
>> >
>> > Fyi. It looks like this test is failing on some targets.
>> > Running LLVM regresison tests on llvm/trunk at 242414 resulted in
>> >
>> > ********************
>> > Testing: 0 .. 10.. 20.. 30.. 40.. 50.. 60.. 70.. 80.. 90..
>> > Testing Time: 86.35s
>> > ********************
>> > Failing Tests (1):
>> > LLVM :: CodeGen/NVPTX/lower-aggr-copies.ll
>> >
>> > Expected Passes : 14028
>> > Expected Failures : 104
>> > Unsupported Tests : 64
>> > Unexpected Failures: 1
>> > ninja: build stopped: subcommand failed.
>> >
>> >
>> >
>> >
>> > Modified: llvm/trunk/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp
>> > URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp?rev=242413&r1=242412&r2=242413&view=diff
>> >
>> >
>> ==============================================================================
>> > --- llvm/trunk/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp (original)
>> > +++ llvm/trunk/lib/Target/NVPTX/NVPTXLowerAggrCopies.cpp Thu Jul 16
>> 11:27:19 2015
>> > @@ -6,6 +6,7 @@
>> > // License. See LICENSE.TXT for details.
>> > //
>> >
>> //===----------------------------------------------------------------------===//
>> > +//
>> > // Lower aggregate copies, memset, memcpy, memmov intrinsics into
>> loops when
>> > // the size is large or is not a compile-time constant.
>> > //
>> > @@ -25,12 +26,14 @@
>> > #include "llvm/IR/LLVMContext.h"
>> > #include "llvm/IR/Module.h"
>> > #include "llvm/Support/Debug.h"
>> > +#include "llvm/Transforms/Utils/BasicBlockUtils.h"
>> >
>> > #define DEBUG_TYPE "nvptx"
>> >
>> > using namespace llvm;
>> >
>> > namespace {
>> > +
>> > // actual analysis class, which is a functionpass
>> > struct NVPTXLowerAggrCopies : public FunctionPass {
>> > static char ID;
>> > @@ -50,14 +53,13 @@ struct NVPTXLowerAggrCopies : public Fun
>> > return "Lower aggregate copies/intrinsics into loops";
>> > }
>> > };
>> > -} // namespace
>> >
>> > char NVPTXLowerAggrCopies::ID = 0;
>> >
>> > -// Lower MemTransferInst or load-store pair to loop
>> > -static void convertTransferToLoop(
>> > - Instruction *splitAt, Value *srcAddr, Value *dstAddr, Value *len,
>> > - bool srcVolatile, bool dstVolatile, LLVMContext &Context, Function
>> &F) {
>> > +// Lower memcpy to loop.
>> > +void convertMemCpyToLoop(Instruction *splitAt, Value *srcAddr, Value
>> *dstAddr,
>> > + Value *len, bool srcVolatile, bool
>> dstVolatile,
>> > + LLVMContext &Context, Function &F) {
>> > Type *indType = len->getType();
>> >
>> > BasicBlock *origBB = splitAt->getParent();
>> > @@ -98,10 +100,105 @@ static void convertTransferToLoop(
>> > loop.CreateCondBr(loop.CreateICmpULT(newind, len), loopBB, newBB);
>> > }
>> >
>> > -// Lower MemSetInst to loop
>> > -static void convertMemSetToLoop(Instruction *splitAt, Value *dstAddr,
>> > - Value *len, Value *val, LLVMContext
>> &Context,
>> > - Function &F) {
>> > +// Lower memmove to IR. memmove is required to correctly copy
>> overlapping memory
>> > +// regions; therefore, it has to check the relative positions of the
>> source and
>> > +// destination pointers and choose the copy direction accordingly.
>> > +//
>> > +// The code below is an IR rendition of this C function:
>> > +//
>> > +// void* memmove(void* dst, const void* src, size_t n) {
>> > +// unsigned char* d = dst;
>> > +// const unsigned char* s = src;
>> > +// if (s < d) {
>> > +// // copy backwards
>> > +// while (n--) {
>> > +// d[n] = s[n];
>> > +// }
>> > +// } else {
>> > +// // copy forward
>> > +// for (size_t i = 0; i < n; ++i) {
>> > +// d[i] = s[i];
>> > +// }
>> > +// }
>> > +// return dst;
>> > +// }
>> > +void convertMemMoveToLoop(Instruction *splitAt, Value *srcAddr, Value
>> *dstAddr,
>> > + Value *len, bool srcVolatile, bool
>> dstVolatile,
>> > + LLVMContext &Context, Function &F) {
>> > + Type *TypeOfLen = len->getType();
>> > + BasicBlock *OrigBB = splitAt->getParent();
>> > +
>> > + // Create the a comparison of src and dst, based on which we jump to
>> either
>> > + // the forward-copy part of the function (if src >= dst) or the
>> backwards-copy
>> > + // part (if src < dst).
>> > + // SplitBlockAndInsertIfThenElse conveniently creates the basic
>> if-then-else
>> > + // structure. Its block terminators (unconditional branches) are
>> replaced by
>> > + // the appropriate conditional branches when the loop is built.
>> > + ICmpInst *PtrCompare = new ICmpInst(splitAt, ICmpInst::ICMP_ULT,
>> srcAddr,
>> > + dstAddr, "compare_src_dst");
>> > + TerminatorInst *ThenTerm, *ElseTerm;
>> > + SplitBlockAndInsertIfThenElse(PtrCompare, splitAt, &ThenTerm,
>> &ElseTerm);
>> > +
>> > + // Each part of the function consists of two blocks:
>> > + // copy_backwards: used to skip the loop when n == 0
>> > + // copy_backwards_loop: the actual backwards loop BB
>> > + // copy_forward: used to skip the loop when n == 0
>> > + // copy_forward_loop: the actual forward loop BB
>> > + BasicBlock *CopyBackwardsBB = ThenTerm->getParent();
>> > + CopyBackwardsBB->setName("copy_backwards");
>> > + BasicBlock *CopyForwardBB = ElseTerm->getParent();
>> > + CopyForwardBB->setName("copy_forward");
>> > + BasicBlock *ExitBB = splitAt->getParent();
>> > + ExitBB->setName("memmove_done");
>> > +
>> > + // Initial comparison of n == 0 that lets us skip the loops
>> altogether. Shared
>> > + // between both backwards and forward copy clauses.
>> > + ICmpInst *CompareN =
>> > + new ICmpInst(OrigBB->getTerminator(), ICmpInst::ICMP_EQ, len,
>> > + ConstantInt::get(TypeOfLen, 0), "compare_n_to_0");
>> > +
>> > + // Copying backwards.
>> > + BasicBlock *LoopBB =
>> > + BasicBlock::Create(Context, "copy_backwards_loop", &F,
>> CopyForwardBB);
>> > + IRBuilder<> LoopBuilder(LoopBB);
>> > + PHINode *LoopPhi = LoopBuilder.CreatePHI(TypeOfLen, 0);
>> > + Value *IndexPtr = LoopBuilder.CreateSub(
>> > + LoopPhi, ConstantInt::get(TypeOfLen, 1), "index_ptr");
>> > + Value *Element = LoopBuilder.CreateLoad(
>> > + LoopBuilder.CreateInBoundsGEP(srcAddr, IndexPtr), "element");
>> > + LoopBuilder.CreateStore(Element,
>> > + LoopBuilder.CreateInBoundsGEP(dstAddr,
>> IndexPtr));
>> > + LoopBuilder.CreateCondBr(
>> > + LoopBuilder.CreateICmpEQ(IndexPtr, ConstantInt::get(TypeOfLen,
>> 0)),
>> > + ExitBB, LoopBB);
>> > + LoopPhi->addIncoming(IndexPtr, LoopBB);
>> > + LoopPhi->addIncoming(len, CopyBackwardsBB);
>> > + BranchInst::Create(ExitBB, LoopBB, CompareN, ThenTerm);
>> > + ThenTerm->removeFromParent();
>> > +
>> > + // Copying forward.
>> > + BasicBlock *FwdLoopBB =
>> > + BasicBlock::Create(Context, "copy_forward_loop", &F, ExitBB);
>> > + IRBuilder<> FwdLoopBuilder(FwdLoopBB);
>> > + PHINode *FwdCopyPhi = FwdLoopBuilder.CreatePHI(TypeOfLen, 0,
>> "index_ptr");
>> > + Value *FwdElement = FwdLoopBuilder.CreateLoad(
>> > + FwdLoopBuilder.CreateInBoundsGEP(srcAddr, FwdCopyPhi),
>> "element");
>> > + FwdLoopBuilder.CreateStore(
>> > + FwdElement, FwdLoopBuilder.CreateInBoundsGEP(dstAddr,
>> FwdCopyPhi));
>> > + Value *FwdIndexPtr = FwdLoopBuilder.CreateAdd(
>> > + FwdCopyPhi, ConstantInt::get(TypeOfLen, 1), "index_increment");
>> > + FwdLoopBuilder.CreateCondBr(FwdLoopBuilder.CreateICmpEQ(FwdIndexPtr,
>> len),
>> > + ExitBB, FwdLoopBB);
>> > + FwdCopyPhi->addIncoming(FwdIndexPtr, FwdLoopBB);
>> > + FwdCopyPhi->addIncoming(ConstantInt::get(TypeOfLen, 0),
>> CopyForwardBB);
>> > +
>> > + BranchInst::Create(ExitBB, FwdLoopBB, CompareN, ElseTerm);
>> > + ElseTerm->removeFromParent();
>> > +}
>> > +
>> > +// Lower memset to loop.
>> > +void convertMemSetToLoop(Instruction *splitAt, Value *dstAddr, Value
>> *len,
>> > + Value *val, LLVMContext &Context, Function
>> &F) {
>> > BasicBlock *origBB = splitAt->getParent();
>> > BasicBlock *newBB = splitAt->getParent()->splitBasicBlock(splitAt,
>> "split");
>> > BasicBlock *loopBB = BasicBlock::Create(Context, "loadstoreloop",
>> &F, newBB);
>> > @@ -129,15 +226,12 @@ static void convertMemSetToLoop(Instruct
>> >
>> > bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
>> > SmallVector<LoadInst *, 4> aggrLoads;
>> > - SmallVector<MemTransferInst *, 4> aggrMemcpys;
>> > - SmallVector<MemSetInst *, 4> aggrMemsets;
>> > + SmallVector<MemIntrinsic *, 4> MemCalls;
>> >
>> > const DataLayout &DL = F.getParent()->getDataLayout();
>> > LLVMContext &Context = F.getParent()->getContext();
>> >
>> > - //
>> > - // Collect all the aggrLoads, aggrMemcpys and addrMemsets.
>> > - //
>> > + // Collect all aggregate loads and mem* calls.
>> > for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE;
>> ++BI) {
>> > for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II !=
>> IE;
>> > ++II) {
>> > @@ -154,34 +248,23 @@ bool NVPTXLowerAggrCopies::runOnFunction
>> > continue;
>> > aggrLoads.push_back(load);
>> > }
>> > - } else if (MemTransferInst *intr =
>> dyn_cast<MemTransferInst>(II)) {
>> > - Value *len = intr->getLength();
>> > - // If the number of elements being copied is greater
>> > - // than MaxAggrCopySize, lower it to a loop
>> > - if (ConstantInt *len_int = dyn_cast<ConstantInt>(len)) {
>> > - if (len_int->getZExtValue() >= MaxAggrCopySize) {
>> > - aggrMemcpys.push_back(intr);
>> > - }
>> > - } else {
>> > - // turn variable length memcpy/memmov into loop
>> > - aggrMemcpys.push_back(intr);
>> > - }
>> > - } else if (MemSetInst *memsetintr = dyn_cast<MemSetInst>(II)) {
>> > - Value *len = memsetintr->getLength();
>> > - if (ConstantInt *len_int = dyn_cast<ConstantInt>(len)) {
>> > - if (len_int->getZExtValue() >= MaxAggrCopySize) {
>> > - aggrMemsets.push_back(memsetintr);
>> > + } else if (MemIntrinsic *IntrCall = dyn_cast<MemIntrinsic>(II)) {
>> > + // Convert intrinsic calls with variable size or with constant
>> size
>> > + // larger than the MaxAggrCopySize threshold.
>> > + if (ConstantInt *LenCI =
>> dyn_cast<ConstantInt>(IntrCall->getLength())) {
>> > + if (LenCI->getZExtValue() >= MaxAggrCopySize) {
>> > + MemCalls.push_back(IntrCall);
>> > }
>> > } else {
>> > - // turn variable length memset into loop
>> > - aggrMemsets.push_back(memsetintr);
>> > + MemCalls.push_back(IntrCall);
>> > }
>> > }
>> > }
>> > }
>> > - if ((aggrLoads.size() == 0) && (aggrMemcpys.size() == 0) &&
>> > - (aggrMemsets.size() == 0))
>> > +
>> > + if (aggrLoads.size() == 0 && MemCalls.size() == 0) {
>> > return false;
>> > + }
>> >
>> > //
>> > // Do the transformation of an aggr load/copy/set to a loop
>> > @@ -193,36 +276,58 @@ bool NVPTXLowerAggrCopies::runOnFunction
>> > unsigned numLoads = DL.getTypeStoreSize(load->getType());
>> > Value *len = ConstantInt::get(Type::getInt32Ty(Context), numLoads);
>> >
>> > - convertTransferToLoop(store, srcAddr, dstAddr, len,
>> load->isVolatile(),
>> > - store->isVolatile(), Context, F);
>> > + convertMemCpyToLoop(store, srcAddr, dstAddr, len,
>> load->isVolatile(),
>> > + store->isVolatile(), Context, F);
>> >
>> > store->eraseFromParent();
>> > load->eraseFromParent();
>> > }
>> >
>> > - for (MemTransferInst *cpy : aggrMemcpys) {
>> > - convertTransferToLoop(/* splitAt */ cpy,
>> > - /* srcAddr */ cpy->getSource(),
>> > - /* dstAddr */ cpy->getDest(),
>> > - /* len */ cpy->getLength(),
>> > - /* srcVolatile */ cpy->isVolatile(),
>> > - /* dstVolatile */ cpy->isVolatile(),
>> > + // Transform mem* intrinsic calls.
>> > + for (MemIntrinsic *MemCall : MemCalls) {
>> > + if (MemCpyInst *Memcpy = dyn_cast<MemCpyInst>(MemCall)) {
>> > + convertMemCpyToLoop(/* splitAt */ Memcpy,
>> > + /* srcAddr */ Memcpy->getRawSource(),
>> > + /* dstAddr */ Memcpy->getRawDest(),
>> > + /* len */ Memcpy->getLength(),
>> > + /* srcVolatile */ Memcpy->isVolatile(),
>> > + /* dstVolatile */ Memcpy->isVolatile(),
>> > /* Context */ Context,
>> > /* Function F */ F);
>> > - cpy->eraseFromParent();
>> > - }
>> > -
>> > - for (MemSetInst *memsetinst : aggrMemsets) {
>> > - Value *len = memsetinst->getLength();
>> > - Value *val = memsetinst->getValue();
>> > - convertMemSetToLoop(memsetinst, memsetinst->getDest(), len, val,
>> Context,
>> > - F);
>> > - memsetinst->eraseFromParent();
>> > + } else if (MemMoveInst *Memmove = dyn_cast<MemMoveInst>(MemCall)) {
>> > + convertMemMoveToLoop(/* splitAt */ Memmove,
>> > + /* srcAddr */ Memmove->getRawSource(),
>> > + /* dstAddr */ Memmove->getRawDest(),
>> > + /* len */ Memmove->getLength(),
>> > + /* srcVolatile */ Memmove->isVolatile(),
>> > + /* dstVolatile */ Memmove->isVolatile(),
>> > + /* Context */ Context,
>> > + /* Function F */ F);
>> > +
>> > + } else if (MemSetInst *Memset = dyn_cast<MemSetInst>(MemCall)) {
>> > + convertMemSetToLoop(/* splitAt */ Memset,
>> > + /* dstAddr */ Memset->getRawDest(),
>> > + /* len */ Memset->getLength(),
>> > + /* val */ Memset->getValue(),
>> > + /* Context */ Context,
>> > + /* F */ F);
>> > + }
>> > + MemCall->eraseFromParent();
>> > }
>> >
>> > return true;
>> > }
>> >
>> > +} // namespace
>> > +
>> > +namespace llvm {
>> > +void initializeNVPTXLowerAggrCopiesPass(PassRegistry &);
>> > +}
>> > +
>> > +INITIALIZE_PASS(NVPTXLowerAggrCopies, "nvptx-lower-aggr-copies",
>> > + "Lower aggregate copies, and llvm.mem* intrinsics into
>> loops",
>> > + false, false)
>> > +
>> > FunctionPass *llvm::createLowerAggrCopies() {
>> > return new NVPTXLowerAggrCopies();
>> > }
>> >
>> > Modified: llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp
>> > URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp?rev=242413&r1=242412&r2=242413&view=diff
>> >
>> >
>> ==============================================================================
>> > --- llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp (original)
>> > +++ llvm/trunk/lib/Target/NVPTX/NVPTXTargetMachine.cpp Thu Jul 16
>> 11:27:19 2015
>> > @@ -53,6 +53,7 @@ void initializeGenericToNVVMPass(PassReg
>> > void initializeNVPTXAllocaHoistingPass(PassRegistry &);
>> > void initializeNVPTXAssignValidGlobalNamesPass(PassRegistry&);
>> > void initializeNVPTXFavorNonGenericAddrSpacesPass(PassRegistry &);
>> > +void initializeNVPTXLowerAggrCopiesPass(PassRegistry &);
>> > void initializeNVPTXLowerKernelArgsPass(PassRegistry &);
>> > void initializeNVPTXLowerAllocaPass(PassRegistry &);
>> > }
>> > @@ -64,14 +65,15 @@ extern "C" void LLVMInitializeNVPTXTarge
>> >
>> > // FIXME: This pass is really intended to be invoked during IR
>> optimization,
>> > // but it's very NVPTX-specific.
>> > - initializeNVVMReflectPass(*PassRegistry::getPassRegistry());
>> > - initializeGenericToNVVMPass(*PassRegistry::getPassRegistry());
>> > - initializeNVPTXAllocaHoistingPass(*PassRegistry::getPassRegistry());
>> > -
>> initializeNVPTXAssignValidGlobalNamesPass(*PassRegistry::getPassRegistry());
>> > - initializeNVPTXFavorNonGenericAddrSpacesPass(
>> > - *PassRegistry::getPassRegistry());
>> > - initializeNVPTXLowerKernelArgsPass(*PassRegistry::getPassRegistry());
>> > - initializeNVPTXLowerAllocaPass(*PassRegistry::getPassRegistry());
>> > + PassRegistry &PR = *PassRegistry::getPassRegistry();
>> > + initializeNVVMReflectPass(PR);
>> > + initializeGenericToNVVMPass(PR);
>> > + initializeNVPTXAllocaHoistingPass(PR);
>> > + initializeNVPTXAssignValidGlobalNamesPass(PR);
>> > + initializeNVPTXFavorNonGenericAddrSpacesPass(PR);
>> > + initializeNVPTXLowerKernelArgsPass(PR);
>> > + initializeNVPTXLowerAllocaPass(PR);
>> > + initializeNVPTXLowerAggrCopiesPass(PR);
>> > }
>> >
>> > static std::string computeDataLayout(bool is64Bit) {
>> >
>> > Modified: llvm/trunk/test/CodeGen/NVPTX/lower-aggr-copies.ll
>> > URL:
>> http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/NVPTX/lower-aggr-copies.ll?rev=242413&r1=242412&r2=242413&view=diff
>> >
>> >
>> ==============================================================================
>> > --- llvm/trunk/test/CodeGen/NVPTX/lower-aggr-copies.ll (original)
>> > +++ llvm/trunk/test/CodeGen/NVPTX/lower-aggr-copies.ll Thu Jul 16
>> 11:27:19 2015
>> > @@ -1,35 +1,68 @@
>> > -; RUN: llc < %s -march=nvptx -mcpu=sm_35 | FileCheck %s
>> > +; RUN: llc < %s -march=nvptx64 -mcpu=sm_35 | FileCheck %s
>> --check-prefix PTX
>> > +; RUN: opt < %s -S -nvptx-lower-aggr-copies | FileCheck %s
>> --check-prefix IR
>> >
>> > ; Verify that the NVPTXLowerAggrCopies pass works as expected - calls
>> to
>> > ; llvm.mem* intrinsics get lowered to loops.
>> >
>> > +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
>> > +target triple = "nvptx64-unknown-unknown"
>> > +
>> > declare void @llvm.memcpy.p0i8.p0i8.i64(i8* nocapture, i8* nocapture
>> readonly, i64, i32, i1) #1
>> > +declare void @llvm.memmove.p0i8.p0i8.i64(i8* nocapture, i8* nocapture
>> readonly, i64, i32, i1) #1
>> > declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i32, i1) #1
>> >
>> > define i8* @memcpy_caller(i8* %dst, i8* %src, i64 %n) #0 {
>> > entry:
>> > tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64
>> %n, i32 1, i1 false)
>> > ret i8* %dst
>> > -; CHECK-LABEL: .visible .func (.param .b32 func_retval0) memcpy_caller
>> > -; CHECK: LBB[[LABEL:[_0-9]+]]:
>> > -; CHECK: ld.u8 %rs[[REG:[0-9]+]]
>> > -; CHECK: st.u8 [%r{{[0-9]+}}], %rs[[REG]]
>> > -; CHECK: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
>> > -; CHECK-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
>> > -; CHECK-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
>> > +
>> > +; IR-LABEL: @memcpy_caller
>> > +; IR: loadstoreloop:
>> > +; IR: [[LOADPTR:%[0-9]+]] = getelementptr i8, i8* %src, i64
>> > +; IR-NEXT: [[VAL:%[0-9]+]] = load i8, i8* [[LOADPTR]]
>> > +; IR-NEXT: [[STOREPTR:%[0-9]+]] = getelementptr i8, i8* %dst, i64
>> > +; IR-NEXT: store i8 [[VAL]], i8* [[STOREPTR]]
>> > +
>> > +; PTX-LABEL: .visible .func (.param .b64 func_retval0) memcpy_caller
>> > +; PTX: LBB[[LABEL:[_0-9]+]]:
>> > +; PTX: ld.u8 %rs[[REG:[0-9]+]]
>> > +; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
>> > +; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
>> > +; PTX-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
>> > +; PTX-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
>> > }
>> >
>> > define i8* @memcpy_volatile_caller(i8* %dst, i8* %src, i64 %n) #0 {
>> > entry:
>> > tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %dst, i8* %src, i64
>> %n, i32 1, i1 true)
>> > ret i8* %dst
>> > -; CHECK-LABEL: .visible .func (.param .b32 func_retval0)
>> memcpy_volatile_caller
>> > -; CHECK: LBB[[LABEL:[_0-9]+]]:
>> > -; CHECK: ld.volatile.u8 %rs[[REG:[0-9]+]]
>> > -; CHECK: st.volatile.u8 [%r{{[0-9]+}}], %rs[[REG]]
>> > -; CHECK: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
>> > -; CHECK-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
>> > -; CHECK-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
>> > +
>> > +; IR-LABEL: @memcpy_volatile_caller
>> > +; IR: load volatile
>> > +; IR: store volatile
>> > +
>> > +; PTX-LABEL: .visible .func (.param .b64 func_retval0)
>> memcpy_volatile_caller
>> > +; PTX: LBB[[LABEL:[_0-9]+]]:
>> > +; PTX: ld.volatile.u8 %rs[[REG:[0-9]+]]
>> > +; PTX: st.volatile.u8 [%rd{{[0-9]+}}], %rs[[REG]]
>> > +; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
>> > +; PTX-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
>> > +; PTX-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
>> > +}
>> > +
>> > +define i8* @memcpy_casting_caller(i32* %dst, i32* %src, i64 %n) #0 {
>> > +entry:
>> > + %0 = bitcast i32* %dst to i8*
>> > + %1 = bitcast i32* %src to i8*
>> > + tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %0, i8* %1, i64 %n,
>> i32 1, i1 false)
>> > + ret i8* %0
>> > +
>> > +; Check that casts in calls to memcpy are handled properly
>> > +; IR-LABEL: @memcpy_casting_caller
>> > +; IR: [[DSTCAST:%[0-9]+]] = bitcast i32* %dst to i8*
>> > +; IR: [[SRCCAST:%[0-9]+]] = bitcast i32* %src to i8*
>> > +; IR: getelementptr i8, i8* [[SRCCAST]]
>> > +; IR: getelementptr i8, i8* [[DSTCAST]]
>> > }
>> >
>> > define i8* @memset_caller(i8* %dst, i32 %c, i64 %n) #0 {
>> > @@ -37,11 +70,52 @@ entry:
>> > %0 = trunc i32 %c to i8
>> > tail call void @llvm.memset.p0i8.i64(i8* %dst, i8 %0, i64 %n, i32 1,
>> i1 false)
>> > ret i8* %dst
>> > -; CHECK-LABEL: .visible .func (.param .b32 func_retval0) memset_caller(
>> > -; CHECK: ld.param.u8 %rs[[REG:[0-9]+]]
>> > -; CHECK: LBB[[LABEL:[_0-9]+]]:
>> > -; CHECK: st.u8 [%r{{[0-9]+}}], %rs[[REG]]
>> > -; CHECK: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
>> > -; CHECK-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
>> > -; CHECK-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
>> > +
>> > +; IR-LABEL: @memset_caller
>> > +; IR: [[VAL:%[0-9]+]] = trunc i32 %c to i8
>> > +; IR: loadstoreloop:
>> > +; IR: [[STOREPTR:%[0-9]+]] = getelementptr i8, i8* %dst, i64
>> > +; IR-NEXT: store i8 [[VAL]], i8* [[STOREPTR]]
>> > +
>> > +; PTX-LABEL: .visible .func (.param .b64 func_retval0) memset_caller(
>> > +; PTX: ld.param.u8 %rs[[REG:[0-9]+]]
>> > +; PTX: LBB[[LABEL:[_0-9]+]]:
>> > +; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[REG]]
>> > +; PTX: add.s64 %rd[[COUNTER:[0-9]+]], %rd[[COUNTER]], 1
>> > +; PTX-NEXT: setp.lt.u64 %p[[PRED:[0-9]+]], %rd[[COUNTER]], %rd
>> > +; PTX-NEXT: @%p[[PRED]] bra LBB[[LABEL]]
>> > +}
>> > +
>> > +define i8* @memmove_caller(i8* %dst, i8* %src, i64 %n) #0 {
>> > +entry:
>> > + tail call void @llvm.memmove.p0i8.p0i8.i64(i8* %dst, i8* %src, i64
>> %n, i32 1, i1 false)
>> > + ret i8* %dst
>> > +
>> > +; IR-LABEL: @memmove_caller
>> > +; IR: icmp ult i8* %src, %dst
>> > +; IR: [[PHIVAL:%[0-9a-zA-Z_]+]] = phi i64
>> > +; IR-NEXT: %index_ptr = sub i64 [[PHIVAL]], 1
>> > +; IR: [[FWDPHIVAL:%[0-9a-zA-Z_]+]] = phi i64
>> > +; IR: {{%[0-9a-zA-Z_]+}} = add i64 [[FWDPHIVAL]], 1
>> > +
>> > +; PTX-LABEL: .visible .func (.param .b64 func_retval0) memmove_caller(
>> > +; PTX: ld.param.u64 %rd[[N:[0-9]+]]
>> > +; PTX: setp.eq.s64 %p[[NEQ0:[0-9]+]], %rd[[N]], 0
>> > +; PTX: setp.ge.u64 %p[[SRC_GT_THAN_DST:[0-9]+]], %rd{{[0-9]+}},
>> %rd{{[0-9]+}}
>> > +; PTX-NEXT: @%p[[SRC_GT_THAN_DST]] bra LBB[[FORWARD_BB:[0-9_]+]]
>> > +; -- this is the backwards copying BB
>> > +; PTX: @%p[[NEQ0]] bra LBB[[EXIT:[0-9_]+]]
>> > +; PTX: add.s64 %rd[[N]], %rd[[N]], -1
>> > +; PTX: ld.u8 %rs[[ELEMENT:[0-9]+]]
>> > +; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT]]
>> > +; -- this is the forwards copying BB
>> > +; PTX: LBB[[FORWARD_BB]]:
>> > +; PTX: @%p[[NEQ0]] bra LBB[[EXIT]]
>> > +; PTX: ld.u8 %rs[[ELEMENT2:[0-9]+]]
>> > +; PTX: st.u8 [%rd{{[0-9]+}}], %rs[[ELEMENT2]]
>> > +; PTX: add.s64 %rd[[INDEX:[0-9]+]], %rd[[INDEX]], 1
>> > +; -- exit block
>> > +; PTX: LBB[[EXIT]]:
>> > +; PTX-NEXT: st.param.b64 [func_retval0
>> > +; PTX-NEXT: ret
>> > }
>> >
>> >
>> > _______________________________________________
>> > llvm-commits mailing list
>> > llvm-commits at cs.uiuc.edu
>> > http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>> >
>> >
>> >
>> > _______________________________________________
>> > llvm-commits mailing list
>> > llvm-commits at cs.uiuc.edu
>> > http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>>
>>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20150716/30eb61b7/attachment.html>
More information about the llvm-commits
mailing list