[llvm] a15ed70 - AMDGPU: Fix assert on constant load from addrspacecasted pointer

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Tue May 11 17:21:23 PDT 2021


Author: Matt Arsenault
Date: 2021-05-11T20:12:20-04:00
New Revision: a15ed701ab30d0073f46139df850fe23b03fd3ac

URL: https://github.com/llvm/llvm-project/commit/a15ed701ab30d0073f46139df850fe23b03fd3ac
DIFF: https://github.com/llvm/llvm-project/commit/a15ed701ab30d0073f46139df850fe23b03fd3ac.diff

LOG: AMDGPU: Fix assert on constant load from addrspacecasted pointer

This was trying to create a bitcast between different address spaces.

Added: 
    llvm/test/CodeGen/AMDGPU/amdgpu-late-codegenprepare.ll

Modified: 
    llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
index e4aa6de10f2de..e57d971b6ef70 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULateCodeGenPrepare.cpp
@@ -165,7 +165,7 @@ bool AMDGPULateCodeGenPrepare::visitLoadInst(LoadInst &LI) {
   PointerType *Int32PtrTy = Type::getInt32PtrTy(LI.getContext(), AS);
   PointerType *Int8PtrTy = Type::getInt8PtrTy(LI.getContext(), AS);
   auto *NewPtr = IRB.CreateBitCast(
-      IRB.CreateConstGEP1_64(IRB.CreateBitCast(Base, Int8PtrTy),
+      IRB.CreateConstGEP1_64(IRB.CreatePointerBitCastOrAddrSpaceCast(Base, Int8PtrTy),
                              Offset - Adjust),
       Int32PtrTy);
   LoadInst *NewLd = IRB.CreateAlignedLoad(IRB.getInt32Ty(), NewPtr, Align(4));

diff  --git a/llvm/test/CodeGen/AMDGPU/amdgpu-late-codegenprepare.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-late-codegenprepare.ll
new file mode 100644
index 0000000000000..51a4ea9288d25
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-late-codegenprepare.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-late-codegenprepare %s | FileCheck %s
+
+; Make sure we don't crash when trying to create a bitcast between
+; address spaces
+define amdgpu_kernel void @constant_from_offset_cast_generic_null() {
+; CHECK-LABEL: @constant_from_offset_cast_generic_null(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8* null to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT:    store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT:    ret void
+;
+  %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast (i8* null to i8 addrspace(4)*), i64 6), align 1
+  store i8 %load, i8 addrspace(1)* undef
+  ret void
+}
+
+define amdgpu_kernel void @constant_from_offset_cast_global_null() {
+; CHECK-LABEL: @constant_from_offset_cast_global_null(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* null to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT:    store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT:    ret void
+;
+  %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* null to i8 addrspace(4)*), i64 6), align 1
+  store i8 %load, i8 addrspace(1)* undef
+  ret void
+}
+
+ at gv = unnamed_addr addrspace(1) global [64 x i8] undef, align 4
+
+define amdgpu_kernel void @constant_from_offset_cast_global_gv() {
+; CHECK-LABEL: @constant_from_offset_cast_global_gv(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8 addrspace(1)* getelementptr inbounds ([64 x i8], [64 x i8] addrspace(1)* @gv, i32 0, i32 0) to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT:    store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT:    ret void
+;
+  %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast ([64 x i8] addrspace(1)* @gv to i8 addrspace(4)*), i64 6), align 1
+  store i8 %load, i8 addrspace(1)* undef
+  ret void
+}
+
+define amdgpu_kernel void @constant_from_offset_cast_generic_inttoptr() {
+; CHECK-LABEL: @constant_from_offset_cast_generic_inttoptr(
+; CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32 addrspace(4)* bitcast (i8 addrspace(4)* getelementptr (i8, i8 addrspace(4)* addrspacecast (i8* inttoptr (i64 128 to i8*) to i8 addrspace(4)*), i64 4) to i32 addrspace(4)*), align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
+; CHECK-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
+; CHECK-NEXT:    store i8 [[TMP3]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT:    ret void
+;
+  %load = load i8, i8 addrspace(4)* getelementptr inbounds (i8, i8 addrspace(4)* addrspacecast (i8* inttoptr (i64 128 to i8*) to i8 addrspace(4)*), i64 6), align 1
+  store i8 %load, i8 addrspace(1)* undef
+  ret void
+}
+
+define amdgpu_kernel void @constant_from_inttoptr() {
+; CHECK-LABEL: @constant_from_inttoptr(
+; CHECK-NEXT:    [[LOAD:%.*]] = load i8, i8 addrspace(4)* inttoptr (i64 128 to i8 addrspace(4)*), align 4
+; CHECK-NEXT:    store i8 [[LOAD]], i8 addrspace(1)* undef, align 1
+; CHECK-NEXT:    ret void
+;
+  %load = load i8, i8 addrspace(4)* inttoptr (i64 128 to i8 addrspace(4)*), align 1
+  store i8 %load, i8 addrspace(1)* undef
+  ret void
+}


        


More information about the llvm-commits mailing list