[llvm] r271935 - AMDGPU: Fix constantexpr addrspacecasts

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 6 13:03:31 PDT 2016


Author: arsenm
Date: Mon Jun  6 15:03:31 2016
New Revision: 271935

URL: http://llvm.org/viewvc/llvm-project?rev=271935&view=rev
Log:
AMDGPU: Fix constantexpr addrspacecasts

If we had a constant group address space cast the queue pointer
wasn't enabled for the function, resulting in a crash on noreg
later.

Added:
    llvm/trunk/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
Modified:
    llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
    llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp?rev=271935&r1=271934&r2=271935&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUAnnotateKernelFeatures.cpp Mon Jun  6 15:03:31 2016
@@ -13,6 +13,7 @@
 //===----------------------------------------------------------------------===//
 
 #include "AMDGPU.h"
+#include "llvm/IR/Constants.h"
 #include "llvm/IR/Instructions.h"
 #include "llvm/IR/Module.h"
 
@@ -42,6 +43,11 @@ public:
     AU.setPreservesAll();
     ModulePass::getAnalysisUsage(AU);
   }
+
+  static bool visitConstantExpr(const ConstantExpr *CE);
+  static bool visitConstantExprsRecursively(
+    const Constant *EntryC,
+    SmallPtrSet<const Constant *, 8> &ConstantExprVisited);
 };
 
 }
@@ -53,21 +59,79 @@ char &llvm::AMDGPUAnnotateKernelFeatures
 INITIALIZE_PASS(AMDGPUAnnotateKernelFeatures, DEBUG_TYPE,
                 "Add AMDGPU function attributes", false, false)
 
-static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC) {
-  unsigned SrcAS = ASC->getSrcAddressSpace();
 
-  // The queue ptr is only needed when casting to flat, not from it.
+// The queue ptr is only needed when casting to flat, not from it.
+static bool castRequiresQueuePtr(unsigned SrcAS) {
   return SrcAS == AMDGPUAS::LOCAL_ADDRESS || SrcAS == AMDGPUAS::PRIVATE_ADDRESS;
 }
 
+static bool castRequiresQueuePtr(const AddrSpaceCastInst *ASC) {
+  return castRequiresQueuePtr(ASC->getSrcAddressSpace());
+}
+
+bool AMDGPUAnnotateKernelFeatures::visitConstantExpr(const ConstantExpr *CE) {
+  if (CE->getOpcode() == Instruction::AddrSpaceCast) {
+    unsigned SrcAS = CE->getOperand(0)->getType()->getPointerAddressSpace();
+    return castRequiresQueuePtr(SrcAS);
+  }
+
+  return false;
+}
+
+bool AMDGPUAnnotateKernelFeatures::visitConstantExprsRecursively(
+  const Constant *EntryC,
+  SmallPtrSet<const Constant *, 8> &ConstantExprVisited) {
+
+  if (!ConstantExprVisited.insert(EntryC).second)
+    return false;
+
+  SmallVector<const Constant *, 16> Stack;
+  Stack.push_back(EntryC);
+
+  while (!Stack.empty()) {
+    const Constant *C = Stack.pop_back_val();
+
+    // Check this constant expression.
+    if (const auto *CE = dyn_cast<ConstantExpr>(C)) {
+      if (visitConstantExpr(CE))
+        return true;
+    }
+
+    // Visit all sub-expressions.
+    for (const Use &U : C->operands()) {
+      const auto *OpC = dyn_cast<Constant>(U);
+      if (!OpC)
+        continue;
+
+      if (!ConstantExprVisited.insert(OpC).second)
+        continue;
+
+      Stack.push_back(OpC);
+    }
+  }
+
+  return false;
+}
+
 // Return true if an addrspacecast is used that requires the queue ptr.
 bool AMDGPUAnnotateKernelFeatures::hasAddrSpaceCast(const Function &F) {
+  SmallPtrSet<const Constant *, 8> ConstantExprVisited;
+
   for (const BasicBlock &BB : F) {
     for (const Instruction &I : BB) {
       if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(&I)) {
         if (castRequiresQueuePtr(ASC))
           return true;
       }
+
+      for (const Use &U : I.operands()) {
+        const auto *OpC = dyn_cast<Constant>(U);
+        if (!OpC)
+          continue;
+
+        if (visitConstantExprsRecursively(OpC, ConstantExprVisited))
+          return true;
+      }
     }
   }
 

Modified: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp?rev=271935&r1=271934&r2=271935&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp Mon Jun  6 15:03:31 2016
@@ -1334,8 +1334,11 @@ SDValue SITargetLowering::getSegmentAper
   SDLoc SL;
   MachineFunction &MF = DAG.getMachineFunction();
   SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
+  unsigned UserSGPR = Info->getQueuePtrUserSGPR();
+  assert(UserSGPR != AMDGPU::NoRegister);
+
   SDValue QueuePtr = CreateLiveInRegister(
-    DAG, &AMDGPU::SReg_64RegClass, Info->getQueuePtrUserSGPR(), MVT::i64);
+    DAG, &AMDGPU::SReg_64RegClass, UserSGPR, MVT::i64);
 
   // Offset into amd_queue_t for group_segment_aperture_base_hi /
   // private_segment_aperture_base_hi.

Added: llvm/trunk/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll?rev=271935&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll (added)
+++ llvm/trunk/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll Mon Jun  6 15:03:31 2016
@@ -0,0 +1,106 @@
+; RUN: opt -mtriple=amdgcn-unknown-amdhsa -S -amdgpu-annotate-kernel-features < %s | FileCheck -check-prefix=HSA %s
+
+declare void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* nocapture, i32 addrspace(4)* nocapture, i32, i32, i1) #0
+
+ at lds.i32 = unnamed_addr addrspace(3) global i32 undef, align 4
+ at lds.arr = unnamed_addr addrspace(3) global [256 x i32] undef, align 4
+
+ at global.i32 = unnamed_addr addrspace(1) global i32 undef, align 4
+ at global.arr = unnamed_addr addrspace(1) global [256 x i32] undef, align 4
+
+; HSA: @store_cast_0_flat_to_group_addrspacecast() #1
+define void @store_cast_0_flat_to_group_addrspacecast() #1 {
+  store i32 7, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* null to i32 addrspace(3)*)
+  ret void
+}
+
+; HSA: @store_cast_0_group_to_flat_addrspacecast() #2
+define void @store_cast_0_group_to_flat_addrspacecast() #1 {
+  store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* null to i32 addrspace(4)*)
+  ret void
+}
+
+; HSA: define void @store_constant_cast_group_gv_to_flat() #2
+define void @store_constant_cast_group_gv_to_flat() #1 {
+  store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(3)* @lds.i32 to i32 addrspace(4)*)
+  ret void
+}
+
+; HSA: @store_constant_cast_group_gv_gep_to_flat() #2
+define void @store_constant_cast_group_gv_gep_to_flat() #1 {
+  store i32 7, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
+  ret void
+}
+
+; HSA: @store_constant_cast_global_gv_to_flat() #1
+define void @store_constant_cast_global_gv_to_flat() #1 {
+  store i32 7, i32 addrspace(4)* addrspacecast (i32 addrspace(1)* @global.i32 to i32 addrspace(4)*)
+  ret void
+}
+
+; HSA: @store_constant_cast_global_gv_gep_to_flat() #1
+define void @store_constant_cast_global_gv_gep_to_flat() #1 {
+  store i32 7, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(1)* @global.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
+  ret void
+}
+
+; HSA: @load_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
+define void @load_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
+  %val = load i32, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8)
+  store i32 %val, i32 addrspace(1)* %out
+  ret void
+}
+
+; HSA: @atomicrmw_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
+define void @atomicrmw_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
+  %val = atomicrmw add i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 1 seq_cst
+  store i32 %val, i32 addrspace(1)* %out
+  ret void
+}
+
+; HSA: @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
+define void @cmpxchg_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
+  %val = cmpxchg i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 0, i32 1 seq_cst seq_cst
+  %val0 = extractvalue { i32, i1 } %val, 0
+  store i32 %val0, i32 addrspace(1)* %out
+  ret void
+}
+
+; HSA: @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #2
+define void @memcpy_constant_cast_group_gv_gep_to_flat(i32 addrspace(1)* %out) #1 {
+  call void @llvm.memcpy.p1i32.p4i32.i32(i32 addrspace(1)* %out, i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 32, i32 4, i1 false)
+  ret void
+}
+
+; Can't just search the pointer value
+; HSA: @store_value_constant_cast_lds_gv_gep_to_flat(i32 addrspace(4)* addrspace(1)* %out) #2
+define void @store_value_constant_cast_lds_gv_gep_to_flat(i32 addrspace(4)* addrspace(1)* %out) #1 {
+  store i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8), i32 addrspace(4)* addrspace(1)* %out
+  ret void
+}
+
+; Can't just search pointer types
+; HSA: @store_ptrtoint_value_constant_cast_lds_gv_gep_to_flat(i64 addrspace(1)* %out) #2
+define void @store_ptrtoint_value_constant_cast_lds_gv_gep_to_flat(i64 addrspace(1)* %out) #1 {
+  store i64 ptrtoint (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i64), i64 addrspace(1)* %out
+  ret void
+}
+
+; Cast group to flat, do GEP, cast back to group
+; HSA: @store_constant_cast_group_gv_gep_to_flat_to_group() #2
+define void @store_constant_cast_group_gv_gep_to_flat_to_group() #1 {
+  store i32 7, i32 addrspace(3)* addrspacecast (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i32 addrspace(3)*)
+  ret void
+}
+
+; HSA: @ret_constant_cast_group_gv_gep_to_flat_to_group() #2
+define i32 addrspace(3)* @ret_constant_cast_group_gv_gep_to_flat_to_group() #1 {
+  ret i32 addrspace(3)* addrspacecast (i32 addrspace(4)* getelementptr ([256 x i32], [256 x i32] addrspace(4)* addrspacecast ([256 x i32] addrspace(3)* @lds.arr to [256 x i32] addrspace(4)*), i64 0, i64 8) to i32 addrspace(3)*)
+}
+
+; HSA: attributes #0 = { argmemonly nounwind }
+; HSA: attributes #1 = { nounwind }
+; HSA: attributes #2 = { nounwind "amdgpu-queue-ptr" }
+
+attributes #0 = { argmemonly nounwind }
+attributes #1 = { nounwind }




More information about the llvm-commits mailing list