[llvm] r363022 - AtomicExpand: Don't crash on non-0 alloca

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 10 18:35:07 PDT 2019


Author: arsenm
Date: Mon Jun 10 18:35:07 2019
New Revision: 363022

URL: http://llvm.org/viewvc/llvm-project?rev=363022&view=rev
Log:
AtomicExpand: Don't crash on non-0 alloca

This now produces garbage on AMDGPU with a call to an nonexistent,
anonymous libcall but won't assert.

Added:
    llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
Modified:
    llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp

Modified: llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp?rev=363022&r1=363021&r2=363022&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp (original)
+++ llvm/trunk/lib/CodeGen/AtomicExpandPass.cpp Mon Jun 10 18:35:07 2019
@@ -1712,8 +1712,11 @@ bool AtomicExpand::expandAtomicOpToLibca
   if (CASExpected) {
     AllocaCASExpected = AllocaBuilder.CreateAlloca(CASExpected->getType());
     AllocaCASExpected->setAlignment(AllocaAlignment);
+    unsigned AllocaAS =  AllocaCASExpected->getType()->getPointerAddressSpace();
+
     AllocaCASExpected_i8 =
-        Builder.CreateBitCast(AllocaCASExpected, Type::getInt8PtrTy(Ctx));
+      Builder.CreateBitCast(AllocaCASExpected,
+                            Type::getInt8PtrTy(Ctx, AllocaAS));
     Builder.CreateLifetimeStart(AllocaCASExpected_i8, SizeVal64);
     Builder.CreateAlignedStore(CASExpected, AllocaCASExpected, AllocaAlignment);
     Args.push_back(AllocaCASExpected_i8);
@@ -1740,8 +1743,9 @@ bool AtomicExpand::expandAtomicOpToLibca
   if (!CASExpected && HasResult && !UseSizedLibcall) {
     AllocaResult = AllocaBuilder.CreateAlloca(I->getType());
     AllocaResult->setAlignment(AllocaAlignment);
+    unsigned AllocaAS =  AllocaResult->getType()->getPointerAddressSpace();
     AllocaResult_i8 =
-        Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx));
+      Builder.CreateBitCast(AllocaResult, Type::getInt8PtrTy(Ctx, AllocaAS));
     Builder.CreateLifetimeStart(AllocaResult_i8, SizeVal64);
     Args.push_back(AllocaResult_i8);
   }

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp?rev=363022&r1=363021&r2=363022&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelLowering.cpp Mon Jun 10 18:35:07 2019
@@ -524,6 +524,7 @@ AMDGPUTargetLowering::AMDGPUTargetLoweri
   setHasMultipleConditionRegisters(true);
 
   setMinCmpXchgSizeInBits(32);
+  setSupportsUnalignedAtomics(false);
 
   PredictableSelectIsExpensive = false;
 

Added: llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll?rev=363022&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll (added)
+++ llvm/trunk/test/Transforms/AtomicExpand/AMDGPU/unaligned-atomic.ll Mon Jun 10 18:35:07 2019
@@ -0,0 +1,37 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -atomic-expand %s | FileCheck -check-prefix=GCN %s
+
+; FIXME: This should not introduce a libcall, much less one to an
+; anonymous function.
+
+define i32 @atomic_load_global_align1(i32 addrspace(1)* %ptr) {
+; GCN-LABEL: @atomic_load_global_align1(
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[PTR:%.*]] to i8 addrspace(1)*
+; GCN-NEXT:    [[TMP2:%.*]] = addrspacecast i8 addrspace(1)* [[TMP1]] to i8*
+; GCN-NEXT:    [[TMP3:%.*]] = alloca i32, align 4
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast i32* [[TMP3]] to i8*
+; GCN-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP4]])
+; GCN-NEXT:    call void @0(i64 4, i8* [[TMP2]], i8* [[TMP4]], i32 5)
+; GCN-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP3]], align 4
+; GCN-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP4]])
+; GCN-NEXT:    ret i32 [[TMP5]]
+;
+  %val = load atomic i32, i32 addrspace(1)* %ptr  seq_cst, align 1
+  ret i32 %val
+}
+
+define void @atomic_store_global_align1(i32 addrspace(1)* %ptr, i32 %val) {
+; GCN-LABEL: @atomic_store_global_align1(
+; GCN-NEXT:    [[TMP1:%.*]] = bitcast i32 addrspace(1)* [[PTR:%.*]] to i8 addrspace(1)*
+; GCN-NEXT:    [[TMP2:%.*]] = addrspacecast i8 addrspace(1)* [[TMP1]] to i8*
+; GCN-NEXT:    [[TMP3:%.*]] = alloca i32, align 4
+; GCN-NEXT:    [[TMP4:%.*]] = bitcast i32* [[TMP3]] to i8*
+; GCN-NEXT:    call void @llvm.lifetime.start.p0i8(i64 4, i8* [[TMP4]])
+; GCN-NEXT:    store i32 [[VAL:%.*]], i32* [[TMP3]], align 4
+; GCN-NEXT:    call void @1(i64 4, i8* [[TMP2]], i8* [[TMP4]], i32 0)
+; GCN-NEXT:    call void @llvm.lifetime.end.p0i8(i64 4, i8* [[TMP4]])
+; GCN-NEXT:    ret void
+;
+  store atomic i32 %val, i32 addrspace(1)* %ptr monotonic, align 1
+  ret void
+}




More information about the llvm-commits mailing list