[PATCH] D154609: [X86] Preserve volatile ATOMIC_LOAD_OR nodes

Nabeel Omer via Phabricator via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 6 07:19:24 PDT 2023


n-omer updated this revision to Diff 537710.

CHANGES SINCE LAST ACTION
  https://reviews.llvm.org/D154609/new/

https://reviews.llvm.org/D154609

Files:
  llvm/lib/Target/X86/X86ISelLowering.cpp
  llvm/test/CodeGen/X86/pr63692.ll


Index: llvm/test/CodeGen/X86/pr63692.ll
===================================================================
--- /dev/null
+++ llvm/test/CodeGen/X86/pr63692.ll
@@ -0,0 +1,31 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc < %s -mtriple=x86_64-- | FileCheck %s
+
+define void @prefault(ptr noundef %range_start, ptr noundef readnone %range_end) {
+; CHECK-LABEL: prefault:
+; CHECK:       # %bb.0: # %entry
+; CHECK-NEXT:    cmpq %rsi, %rdi
+; CHECK-NEXT:    jae .LBB0_3
+; CHECK-NEXT:    .p2align 4, 0x90
+; CHECK-NEXT:  .LBB0_1: # %while.body
+; CHECK-NEXT:    # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT:    lock orb $0, (%rdi)
+; CHECK-NEXT:    addq $4096, %rdi # imm = 0x1000
+; CHECK-NEXT:    cmpq %rsi, %rdi
+; CHECK-NEXT:    jb .LBB0_1
+; CHECK-NEXT:  .LBB0_3: # %while.end
+; CHECK-NEXT:    retq
+entry:
+  %cmp3 = icmp ult ptr %range_start, %range_end
+  br i1 %cmp3, label %while.body, label %while.end
+
+while.body:                                       ; preds = %entry, %while.body
+  %start.04 = phi ptr [ %add.ptr, %while.body ], [ %range_start, %entry ]
+  %0 = atomicrmw volatile or ptr %start.04, i8 0 monotonic, align 1
+  %add.ptr = getelementptr inbounds i8, ptr %start.04, i64 4096
+  %cmp = icmp ult ptr %add.ptr, %range_end
+  br i1 %cmp, label %while.body, label %while.end
+
+while.end:                                        ; preds = %while.body, %entry
+  ret void
+}
Index: llvm/lib/Target/X86/X86ISelLowering.cpp
===================================================================
--- llvm/lib/Target/X86/X86ISelLowering.cpp
+++ llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -33550,7 +33550,9 @@
   // changing, all we need is a lowering for the *ordering* impacts of the
   // atomicrmw.  As such, we can chose a different operation and memory
   // location to minimize impact on other code.
-  if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS)) {
+  // The above holds unless the node is marked volatile in which
+  // case it needs to be preserved according to the langref.
+  if (Opc == ISD::ATOMIC_LOAD_OR && isNullConstant(RHS) && !AN->isVolatile()) {
     // On X86, the only ordering which actually requires an instruction is
     // seq_cst which isn't SingleThread, everything just needs to be preserved
     // during codegen and then dropped. Note that we expect (but don't assume),


-------------- next part --------------
A non-text attachment was scrubbed...
Name: D154609.537710.patch
Type: text/x-patch
Size: 2402 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20230706/7c670e92/attachment.bin>


More information about the llvm-commits mailing list