[llvm] r302943 - AMDGPU/SI: Don't promote to vector if the load/store is volatile.
Changpeng Fang via llvm-commits
llvm-commits at lists.llvm.org
Fri May 12 13:31:12 PDT 2017
Author: chfang
Date: Fri May 12 15:31:12 2017
New Revision: 302943
URL: http://llvm.org/viewvc/llvm-project?rev=302943&view=rev
Log:
AMDGPU/SI: Don't promote to vector if the load/store is volatile.
Summary:
We should not change volatile loads/stores in promoting alloca to vector.
Reviewers:
arsenm
Differential Revision:
http://reviews.llvm.org/D33107
Modified:
llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp?rev=302943&r1=302942&r2=302943&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp Fri May 12 15:31:12 2017
@@ -397,14 +397,17 @@ static Value* GEPToVectorIndex(GetElemen
// instructions.
static bool canVectorizeInst(Instruction *Inst, User *User) {
switch (Inst->getOpcode()) {
- case Instruction::Load:
+ case Instruction::Load: {
+ LoadInst *LI = cast<LoadInst>(Inst);
+ return !LI->isVolatile();
+ }
case Instruction::BitCast:
case Instruction::AddrSpaceCast:
return true;
case Instruction::Store: {
// Must be the stored pointer operand, not a stored value.
StoreInst *SI = cast<StoreInst>(Inst);
- return SI->getPointerOperand() == User;
+ return (SI->getPointerOperand() == User) && !SI->isVolatile();
}
default:
return false;
Modified: llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-volatile.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-volatile.ll?rev=302943&r1=302942&r2=302943&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-volatile.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-volatile.ll Fri May 12 15:31:12 2017
@@ -1,26 +1,26 @@
; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -amdgpu-promote-alloca < %s | FileCheck %s
; CHECK-LABEL: @volatile_load(
-; CHECK: alloca [5 x i32]
+; CHECK: alloca [4 x i32]
; CHECK: load volatile i32, i32*
define amdgpu_kernel void @volatile_load(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
entry:
- %stack = alloca [5 x i32], align 4
+ %stack = alloca [4 x i32], align 4
%tmp = load i32, i32 addrspace(1)* %in, align 4
- %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %tmp
+ %arrayidx1 = getelementptr inbounds [4 x i32], [4 x i32]* %stack, i32 0, i32 %tmp
%load = load volatile i32, i32* %arrayidx1
store i32 %load, i32 addrspace(1)* %out
ret void
}
; CHECK-LABEL: @volatile_store(
-; CHECK: alloca [5 x i32]
+; CHECK: alloca [4 x i32]
; CHECK: store volatile i32 %tmp, i32*
define amdgpu_kernel void @volatile_store(i32 addrspace(1)* nocapture %out, i32 addrspace(1)* nocapture %in) {
entry:
- %stack = alloca [5 x i32], align 4
+ %stack = alloca [4 x i32], align 4
%tmp = load i32, i32 addrspace(1)* %in, align 4
- %arrayidx1 = getelementptr inbounds [5 x i32], [5 x i32]* %stack, i32 0, i32 %tmp
+ %arrayidx1 = getelementptr inbounds [4 x i32], [4 x i32]* %stack, i32 0, i32 %tmp
store volatile i32 %tmp, i32* %arrayidx1
ret void
}
More information about the llvm-commits
mailing list