[llvm] a912579 - [MemoryLocation] Support missing atomic intrinsics in getForArg.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Sat Dec 4 14:18:55 PST 2021


Author: Florian Hahn
Date: 2021-12-04T22:18:39Z
New Revision: a9125792b3be19c40cf1ffbe17b0bd3cc0920fcc

URL: https://github.com/llvm/llvm-project/commit/a9125792b3be19c40cf1ffbe17b0bd3cc0920fcc
DIFF: https://github.com/llvm/llvm-project/commit/a9125792b3be19c40cf1ffbe17b0bd3cc0920fcc.diff

LOG: [MemoryLocation] Support missing atomic intrinsics in getForArg.

getForArgument is missing support for atomic memory transfer
intrinsics. In terms of accessed locations they behave like regular
memory transfer intrinsics and we already support them as such in
getForSource/getForDest.

Added: 
    

Modified: 
    llvm/lib/Analysis/MemoryLocation.cpp
    llvm/test/Analysis/BasicAA/atomic-memory-intrinsics.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/MemoryLocation.cpp b/llvm/lib/Analysis/MemoryLocation.cpp
index 44136a7c36e0e..3d068c460710d 100644
--- a/llvm/lib/Analysis/MemoryLocation.cpp
+++ b/llvm/lib/Analysis/MemoryLocation.cpp
@@ -177,6 +177,9 @@ MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
     case Intrinsic::memcpy:
     case Intrinsic::memcpy_inline:
     case Intrinsic::memmove:
+    case Intrinsic::memcpy_element_unordered_atomic:
+    case Intrinsic::memmove_element_unordered_atomic:
+    case Intrinsic::memset_element_unordered_atomic:
       assert((ArgIdx == 0 || ArgIdx == 1) &&
              "Invalid argument index for memory intrinsic");
       if (ConstantInt *LenCI = dyn_cast<ConstantInt>(II->getArgOperand(2)))
@@ -236,6 +239,10 @@ MemoryLocation MemoryLocation::getForArgument(const CallBase *Call,
                                 II->getArgOperand(1)->getType())),
                             AATags);
     }
+
+    assert(
+        !isa<AnyMemTransferInst>(II) &&
+        "all memory transfer intrinsics should be handled by the switch above");
   }
 
   // We can bound the aliasing properties of memset_pattern16 just as we can

diff  --git a/llvm/test/Analysis/BasicAA/atomic-memory-intrinsics.ll b/llvm/test/Analysis/BasicAA/atomic-memory-intrinsics.ll
index 3d46566ecd131..5728a6e6f8f33 100644
--- a/llvm/test/Analysis/BasicAA/atomic-memory-intrinsics.ll
+++ b/llvm/test/Analysis/BasicAA/atomic-memory-intrinsics.ll
@@ -6,7 +6,7 @@ define void @test_memset_element_unordered_atomic_const_size(i8* noalias %a) {
 ; CHECK-LABEL: Function: test_memset_element_unordered_atomic_const_size
 ; CHECK:       Just Mod (MustAlias):  Ptr: i8* %a	<->  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
 ; CHECK-NEXT:  Just Mod:  Ptr: i8* %a.gep.1	<->  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
-; CHECK-NEXT:  Just Mod:  Ptr: i8* %a.gep.5	<->  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
+; CHECK-NEXT:  NoModRef:  Ptr: i8* %a.gep.5	<->  call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
 ;
 entry:
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %a, i8 0, i64 4, i32 1)
@@ -39,9 +39,9 @@ define void @test_memcpy_element_unordered_atomic_const_size(i8* noalias %a, i8*
 ; CHECK:       Just Ref:  Ptr: i8* %a	<->  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
 ; CHECK-NEXT:  Just Mod:  Ptr: i8* %b	<->  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
 ; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.1	<->  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
-; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.5	<->  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
+; CHECK-NEXT:  NoModRef:  Ptr: i8* %a.gep.5	<->  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
 ; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.1	<->  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
-; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.5	<->  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
+; CHECK-NEXT:  NoModRef:  Ptr: i8* %b.gep.5	<->  call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
 ;
 entry:
   %a.gep.1 = getelementptr i8, i8* %a, i32 1
@@ -85,9 +85,9 @@ define void @test_memmove_element_unordered_atomic_const_size(i8* noalias %a, i8
 ; CHECK:       Just Ref:  Ptr: i8* %a	<->  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
 ; CHECK-NEXT:  Just Mod:  Ptr: i8* %b	<->  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
 ; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.1	<->  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
-; CHECK-NEXT:  Just Ref:  Ptr: i8* %a.gep.5	<->  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
+; CHECK-NEXT:  NoModRef:  Ptr: i8* %a.gep.5	<->  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
 ; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.1	<->  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
-; CHECK-NEXT:  Just Mod:  Ptr: i8* %b.gep.5	<->  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
+; CHECK-NEXT:  NoModRef:  Ptr: i8* %b.gep.5	<->  call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
 ;
 entry:
   %a.gep.1 = getelementptr i8, i8* %a, i32 1


        


More information about the llvm-commits mailing list