[llvm-branch-commits] [llvm-branch] r235338 - Merging r228188:
Tom Stellard
thomas.stellard at amd.com
Mon Apr 20 13:04:57 PDT 2015
Author: tstellar
Date: Mon Apr 20 15:04:57 2015
New Revision: 235338
URL: http://llvm.org/viewvc/llvm-project?rev=235338&view=rev
Log:
Merging r228188:
------------------------------------------------------------------------
r228188 | thomas.stellard | 2015-02-04 15:49:49 -0500 (Wed, 04 Feb 2015) | 5 lines
R600: Don't promote i64 stores to v2i32 during DAG legalization
We take care of this during instruction selection now. This
fixes a potential infinite loop when lowering misaligned stores.
------------------------------------------------------------------------
Modified:
llvm/branches/release_36/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
llvm/branches/release_36/lib/Target/R600/AMDGPUISelLowering.cpp
llvm/branches/release_36/test/CodeGen/R600/unaligned-load-store.ll
Modified: llvm/branches/release_36/lib/Target/R600/AMDGPUISelDAGToDAG.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_36/lib/Target/R600/AMDGPUISelDAGToDAG.cpp?rev=235338&r1=235337&r2=235338&view=diff
==============================================================================
--- llvm/branches/release_36/lib/Target/R600/AMDGPUISelDAGToDAG.cpp (original)
+++ llvm/branches/release_36/lib/Target/R600/AMDGPUISelDAGToDAG.cpp Mon Apr 20 15:04:57 2015
@@ -439,6 +439,31 @@ SDNode *AMDGPUDAGToDAGISel::Select(SDNod
break;
}
+ case ISD::STORE: {
+ // Handle i64 stores here for the same reason mentioned above for loads.
+ StoreSDNode *ST = cast<StoreSDNode>(N);
+ SDValue Value = ST->getValue();
+ if (Value.getValueType() != MVT::i64 || ST->isTruncatingStore())
+ break;
+
+ SDValue NewValue = CurDAG->getNode(ISD::BITCAST, SDLoc(N),
+ MVT::v2i32, Value);
+ SDValue NewStore = CurDAG->getStore(ST->getChain(), SDLoc(N), NewValue,
+ ST->getBasePtr(), ST->getMemOperand());
+
+ CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), NewStore);
+
+ if (NewValue.getOpcode() == ISD::BITCAST) {
+ Select(NewStore.getNode());
+ return SelectCode(NewValue.getNode());
+ }
+
+ // getNode() may fold the bitcast if its input was another bitcast. If that
+ // happens we should only select the new store.
+ N = NewStore.getNode();
+ break;
+ }
+
case AMDGPUISD::REGISTER_LOAD: {
if (ST.getGeneration() <= AMDGPUSubtarget::NORTHERN_ISLANDS)
break;
Modified: llvm/branches/release_36/lib/Target/R600/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_36/lib/Target/R600/AMDGPUISelLowering.cpp?rev=235338&r1=235337&r2=235338&view=diff
==============================================================================
--- llvm/branches/release_36/lib/Target/R600/AMDGPUISelLowering.cpp (original)
+++ llvm/branches/release_36/lib/Target/R600/AMDGPUISelLowering.cpp Mon Apr 20 15:04:57 2015
@@ -141,9 +141,6 @@ AMDGPUTargetLowering::AMDGPUTargetLoweri
setOperationAction(ISD::STORE, MVT::v2f32, Promote);
AddPromotedToType(ISD::STORE, MVT::v2f32, MVT::v2i32);
- setOperationAction(ISD::STORE, MVT::i64, Promote);
- AddPromotedToType(ISD::STORE, MVT::i64, MVT::v2i32);
-
setOperationAction(ISD::STORE, MVT::v4f32, Promote);
AddPromotedToType(ISD::STORE, MVT::v4f32, MVT::v4i32);
Modified: llvm/branches/release_36/test/CodeGen/R600/unaligned-load-store.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_36/test/CodeGen/R600/unaligned-load-store.ll?rev=235338&r1=235337&r2=235338&view=diff
==============================================================================
--- llvm/branches/release_36/test/CodeGen/R600/unaligned-load-store.ll (original)
+++ llvm/branches/release_36/test/CodeGen/R600/unaligned-load-store.ll Mon Apr 20 15:04:57 2015
@@ -35,7 +35,8 @@ define void @unaligned_load_store_i32_gl
; SI: ds_read_u8
; SI: ds_read_u8
; SI: ds_read_u8
-; SI: ds_write2_b32
+; SI: ds_write_b32
+; SI: ds_write_b32
; SI: s_endpgm
define void @unaligned_load_store_i64_local(i64 addrspace(3)* %p, i64 addrspace(3)* %r) {
%v = load i64 addrspace(3)* %p, align 1
@@ -52,7 +53,8 @@ define void @unaligned_load_store_i64_lo
; SI: buffer_load_ubyte
; SI: buffer_load_ubyte
; SI: buffer_load_ubyte
-; SI: buffer_store_dwordx2
+; SI: buffer_store_dword
+; SI: buffer_store_dword
define void @unaligned_load_store_i64_global(i64 addrspace(1)* %p, i64 addrspace(1)* %r) {
%v = load i64 addrspace(1)* %p, align 1
store i64 %v, i64 addrspace(1)* %r, align 1
More information about the llvm-branch-commits
mailing list