[llvm] fda0c8d - AMDGPU: Lower addrspacecast to 32-bit constant

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri May 8 07:46:10 PDT 2020


Author: Matt Arsenault
Date: 2020-05-08T10:46:00-04:00
New Revision: fda0c8df289db56c4f009a157e9c5bfa51c06044

URL: https://github.com/llvm/llvm-project/commit/fda0c8df289db56c4f009a157e9c5bfa51c06044
DIFF: https://github.com/llvm/llvm-project/commit/fda0c8df289db56c4f009a157e9c5bfa51c06044.diff

LOG: AMDGPU: Lower addrspacecast to 32-bit constant

Somehow this was missing from the DAG path, but not global isel.

Added: 
    

Modified: 
    llvm/lib/Target/AMDGPU/SIISelLowering.cpp
    llvm/test/CodeGen/AMDGPU/addrspacecast.ll
    llvm/test/CodeGen/AMDGPU/invalid-addrspacecast.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 467c59c90691..5193efa355a0 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -4941,6 +4941,10 @@ SDValue SITargetLowering::lowerADDRSPACECAST(SDValue Op,
     }
   }
 
+  if (ASC->getDestAddressSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT &&
+      Src.getValueType() == MVT::i64)
+    return DAG.getNode(ISD::TRUNCATE, SL, MVT::i32, Src);
+
   // global <-> flat are no-ops and never emitted.
 
   const MachineFunction &MF = DAG.getMachineFunction();

diff  --git a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
index 5e39e6700401..46d02d30df5f 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast.ll
@@ -106,6 +106,17 @@ define amdgpu_kernel void @use_constant_to_flat_addrspacecast(i32 addrspace(4)*
   ret void
 }
 
+; HSA-LABEl: {{^}}use_constant_to_global_addrspacecast:
+; HSA: s_load_dwordx2 s{{\[}}[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]{{\]}}
+; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
+; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
+; HSA: {{flat|global}}_load_dword v{{[0-9]+}}, v{{\[}}[[VPTRLO]]:[[VPTRHI]]{{\]}}
+define amdgpu_kernel void @use_constant_to_global_addrspacecast(i32 addrspace(4)* %ptr) #0 {
+  %stof = addrspacecast i32 addrspace(4)* %ptr to i32 addrspace(1)*
+  %ld = load volatile i32, i32 addrspace(1)* %stof
+  ret void
+}
+
 ; HSA-LABEL: {{^}}use_flat_to_group_addrspacecast:
 ; HSA: enable_sgpr_private_segment_buffer = 1
 ; HSA: enable_sgpr_dispatch_ptr = 0
@@ -290,6 +301,38 @@ define amdgpu_kernel void @store_flat_scratch(i32 addrspace(1)* noalias %out, i3
   ret void
 }
 
+; HSA-LABEL: {{^}}use_constant_to_constant32_addrspacecast
+; GFX9: s_load_dwordx2 [[PTRPTR:s\[[0-9]+:[0-9]+\]]], s[4:5], 0x0{{$}}
+; GFX9: s_load_dword [[OFFSET:s[0-9]+]], s[4:5], 0x8{{$}}
+; GFX9: s_load_dwordx2 s{{\[}}[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]{{\]}}, [[PTRPTR]], 0x0{{$}}
+; GFX9: s_mov_b32 s[[PTR_HI]], 0{{$}}
+; GFX9: s_add_i32 s[[PTR_LO]], s[[PTR_LO]], [[OFFSET]]
+; GFX9: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x0{{$}}
+define amdgpu_kernel void @use_constant_to_constant32_addrspacecast(i8 addrspace(4)* addrspace(4)* %ptr.ptr, i32 %offset) #0 {
+  %ptr = load volatile i8 addrspace(4)*, i8 addrspace(4)* addrspace(4)* %ptr.ptr
+  %addrspacecast = addrspacecast i8 addrspace(4)* %ptr to i8 addrspace(6)*
+  %gep = getelementptr i8, i8 addrspace(6)* %addrspacecast, i32 %offset
+  %ptr.cast = bitcast i8 addrspace(6)* %gep to i32 addrspace(6)*
+  %load = load volatile i32, i32 addrspace(6)* %ptr.cast, align 4
+  ret void
+}
+
+; HSA-LABEL: {{^}}use_global_to_constant32_addrspacecast
+; GFX9: s_load_dwordx2 [[PTRPTR:s\[[0-9]+:[0-9]+\]]], s[4:5], 0x0{{$}}
+; GFX9: s_load_dword [[OFFSET:s[0-9]+]], s[4:5], 0x8{{$}}
+; GFX9: s_load_dwordx2 s{{\[}}[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]{{\]}}, [[PTRPTR]], 0x0{{$}}
+; GFX9: s_mov_b32 s[[PTR_HI]], 0{{$}}
+; GFX9: s_add_i32 s[[PTR_LO]], s[[PTR_LO]], [[OFFSET]]
+; GFX9: s_load_dword s{{[0-9]+}}, s{{\[}}[[PTR_LO]]:[[PTR_HI]]{{\]}}, 0x0{{$}}
+define amdgpu_kernel void @use_global_to_constant32_addrspacecast(i8 addrspace(1)* addrspace(4)* %ptr.ptr, i32 %offset) #0 {
+  %ptr = load volatile i8 addrspace(1)*, i8 addrspace(1)* addrspace(4)* %ptr.ptr
+  %addrspacecast = addrspacecast i8 addrspace(1)* %ptr to i8 addrspace(6)*
+  %gep = getelementptr i8, i8 addrspace(6)* %addrspacecast, i32 %offset
+  %ptr.cast = bitcast i8 addrspace(6)* %gep to i32 addrspace(6)*
+  %load = load volatile i32, i32 addrspace(6)* %ptr.cast, align 4
+  ret void
+}
+
 declare void @llvm.amdgcn.s.barrier() #1
 declare i32 @llvm.amdgcn.workitem.id.x() #2
 

diff  --git a/llvm/test/CodeGen/AMDGPU/invalid-addrspacecast.ll b/llvm/test/CodeGen/AMDGPU/invalid-addrspacecast.ll
index 2f8e022ed026..cec4eac48154 100644
--- a/llvm/test/CodeGen/AMDGPU/invalid-addrspacecast.ll
+++ b/llvm/test/CodeGen/AMDGPU/invalid-addrspacecast.ll
@@ -13,3 +13,10 @@ define amdgpu_kernel void @use_constant32bit_to_flat_addrspacecast(i32 addrspace
   store volatile i32 7, i32* %stof
   ret void
 }
+
+; ERROR: error: <unknown>:0:0: in function use_local_to_constant32bit_addrspacecast void (i32 addrspace(3)*): invalid addrspacecast
+define amdgpu_kernel void @use_local_to_constant32bit_addrspacecast(i32 addrspace(3)* %ptr) #0 {
+  %stof = addrspacecast i32 addrspace(3)* %ptr to i32 addrspace(6)*
+  %load = load volatile i32, i32 addrspace(6)* %stof
+  ret void
+}


        


More information about the llvm-commits mailing list