[llvm] [AtomicExpandPass] Match isIdempotentRMW with InstcombineRMW (PR #142277)

via llvm-commits llvm-commits at lists.llvm.org
Thu Jun 5 13:46:49 PDT 2025


https://github.com/AZero13 updated https://github.com/llvm/llvm-project/pull/142277

>From 0f73eeef6f76a5703347e3eecaa13c342d0cd963 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sat, 31 May 2025 10:55:07 -0400
Subject: [PATCH 1/4] Pre-commit tests (NFC)

---
 llvm/test/CodeGen/X86/atomic-idempotent.ll    | 332 ++++++++++++++++++
 llvm/test/Transforms/InstCombine/atomicrmw.ll |  20 ++
 2 files changed, 352 insertions(+)

diff --git a/llvm/test/CodeGen/X86/atomic-idempotent.ll b/llvm/test/CodeGen/X86/atomic-idempotent.ll
index 020f9eb793102..1a5dd86ddaedb 100644
--- a/llvm/test/CodeGen/X86/atomic-idempotent.ll
+++ b/llvm/test/CodeGen/X86/atomic-idempotent.ll
@@ -622,4 +622,336 @@ define void @or8_nouse_seq_cst(ptr %p) #0 {
   ret void
 }
 
+define void @atomic_umin_uint_max(ptr %addr) {
+; CHECK-LABEL: @atomic_umin_uint_max(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i32 0 monotonic, align 4
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+; X64-LABEL: atomic_umin_uint_max:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    .p2align 4
+; X64-NEXT:  .LBB15_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    lock cmpxchgl %eax, (%rdi)
+; X64-NEXT:    jne .LBB15_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+;
+; X86-LABEL: atomic_umin_uint_max:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    .p2align 4
+; X86-NEXT:  .LBB15_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    lock cmpxchgl %eax, (%ecx)
+; X86-NEXT:    jne .LBB15_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    retl
+  atomicrmw umin ptr %addr, i32 -1 seq_cst
+  ret void
+}
+
+define void @atomic_umax_zero(ptr %addr) {
+; CHECK-LABEL: @atomic_umax_zero(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i32 0 monotonic, align 4
+; CHECK-NEXT:    ret i32 [[RES]]
+;
+; X64-LABEL: atomic_umax_zero:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    .p2align 4
+; X64-NEXT:  .LBB16_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    lock cmpxchgl %eax, (%rdi)
+; X64-NEXT:    jne .LBB16_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+;
+; X86-LABEL: atomic_umax_zero:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movl (%ecx), %eax
+; X86-NEXT:    .p2align 4
+; X86-NEXT:  .LBB16_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    lock cmpxchgl %eax, (%ecx)
+; X86-NEXT:    jne .LBB16_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    retl
+  atomicrmw umax ptr %addr, i32 0 seq_cst
+  ret void
+}
+
+define void @atomic_min_smax_char(ptr %addr) {
+; CHECK-LABEL: @atomic_min_smax_char(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+; X64-LABEL: atomic_min_smax_char:
+; X64:       # %bb.0:
+; X64-NEXT:    movzbl (%rdi), %eax
+; X64-NEXT:    .p2align 4
+; X64-NEXT:  .LBB17_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    lock cmpxchgb %al, (%rdi)
+; X64-NEXT:    jne .LBB17_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+;
+; X86-LABEL: atomic_min_smax_char:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl (%ecx), %eax
+; X86-NEXT:    .p2align 4
+; X86-NEXT:  .LBB17_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    lock cmpxchgb %al, (%ecx)
+; X86-NEXT:    jne .LBB17_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    retl
+  atomicrmw min ptr %addr, i8 127 seq_cst
+  ret void
+}
+
+define void @atomic_max_smin_char(ptr %addr) {
+; CHECK-LABEL: @atomic_max_smin_char(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+; X64-LABEL: atomic_max_smin_char:
+; X64:       # %bb.0:
+; X64-NEXT:    movzbl (%rdi), %eax
+; X64-NEXT:    .p2align 4
+; X64-NEXT:  .LBB18_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    lock cmpxchgb %al, (%rdi)
+; X64-NEXT:    jne .LBB18_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+;
+; X86-LABEL: atomic_max_smin_char:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl (%ecx), %eax
+; X86-NEXT:    .p2align 4
+; X86-NEXT:  .LBB18_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    lock cmpxchgb %al, (%ecx)
+; X86-NEXT:    jne .LBB18_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    retl
+  atomicrmw max ptr %addr, i8 -128 seq_cst
+  ret void
+}
+
+define void @atomic_min_umax_char(ptr %addr) {
+; CHECK-LABEL: @atomic_min_umax_char(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+; X64-LABEL: atomic_min_umax_char:
+; X64:       # %bb.0:
+; X64-NEXT:    movzbl (%rdi), %eax
+; X64-NEXT:    .p2align 4
+; X64-NEXT:  .LBB19_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    lock cmpxchgb %al, (%rdi)
+; X64-NEXT:    jne .LBB19_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+;
+; X86-LABEL: atomic_min_umax_char:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl (%ecx), %eax
+; X86-NEXT:    .p2align 4
+; X86-NEXT:  .LBB19_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    lock cmpxchgb %al, (%ecx)
+; X86-NEXT:    jne .LBB19_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    retl
+  atomicrmw umin ptr %addr, i8 255 seq_cst
+  ret void
+}
+
+define void @atomic_max_umin_char(ptr %addr) {
+; CHECK-LABEL: @atomic_max_umin_char(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+; X64-LABEL: atomic_max_umin_char:
+; X64:       # %bb.0:
+; X64-NEXT:    movzbl (%rdi), %eax
+; X64-NEXT:    .p2align 4
+; X64-NEXT:  .LBB20_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    lock cmpxchgb %al, (%rdi)
+; X64-NEXT:    jne .LBB20_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+;
+; X86-LABEL: atomic_max_umin_char:
+; X86:       # %bb.0:
+; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-NEXT:    movzbl (%ecx), %eax
+; X86-NEXT:    .p2align 4
+; X86-NEXT:  .LBB20_1: # %atomicrmw.start
+; X86-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-NEXT:    lock cmpxchgb %al, (%ecx)
+; X86-NEXT:    jne .LBB20_1
+; X86-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-NEXT:    retl
+  atomicrmw umax ptr %addr, i8 0 seq_cst
+  ret void
+}
+
+define void @atomic_fadd_zero(ptr %addr) {
+; CHECK-LABEL: @atomic_fadd_zero(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw fadd ptr [[ADDR:%.*]], float -0.000000e+00 monotonic, align 4
+; CHECK-NEXT:    ret float [[RES]]
+;
+; X64-LABEL: atomic_fadd_zero:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    .p2align 4
+; X64-NEXT:  .LBB21_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    lock cmpxchgl %eax, (%rdi)
+; X64-NEXT:    jne .LBB21_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+;
+; X86-SSE2-LABEL: atomic_fadd_zero:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT:    .p2align 4
+; X86-SSE2-NEXT:  .LBB21_1: # %atomicrmw.start
+; X86-SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-SSE2-NEXT:    movd %xmm0, %eax
+; X86-SSE2-NEXT:    lock cmpxchgl %eax, (%ecx)
+; X86-SSE2-NEXT:    movd %eax, %xmm0
+; X86-SSE2-NEXT:    jne .LBB21_1
+; X86-SSE2-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-SSE2-NEXT:    retl
+;
+; X86-SLM-LABEL: atomic_fadd_zero:
+; X86-SLM:       # %bb.0:
+; X86-SLM-NEXT:    subl $8, %esp
+; X86-SLM-NEXT:    .cfi_def_cfa_offset 12
+; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SLM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SLM-NEXT:    .p2align 4
+; X86-SLM-NEXT:  .LBB21_1: # %atomicrmw.start
+; X86-SLM-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-SLM-NEXT:    movss %xmm0, (%esp)
+; X86-SLM-NEXT:    movl (%esp), %eax
+; X86-SLM-NEXT:    lock cmpxchgl %eax, (%ecx)
+; X86-SLM-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-SLM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SLM-NEXT:    jne .LBB21_1
+; X86-SLM-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-SLM-NEXT:    addl $8, %esp
+; X86-SLM-NEXT:    .cfi_def_cfa_offset 4
+; X86-SLM-NEXT:    retl
+;
+; X86-ATOM-LABEL: atomic_fadd_zero:
+; X86-ATOM:       # %bb.0:
+; X86-ATOM-NEXT:    leal -{{[0-9]+}}(%esp), %esp
+; X86-ATOM-NEXT:    .cfi_def_cfa_offset 12
+; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-ATOM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-ATOM-NEXT:    .p2align 4
+; X86-ATOM-NEXT:  .LBB21_1: # %atomicrmw.start
+; X86-ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-ATOM-NEXT:    movss %xmm0, (%esp)
+; X86-ATOM-NEXT:    movl (%esp), %eax
+; X86-ATOM-NEXT:    lock cmpxchgl %eax, (%ecx)
+; X86-ATOM-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-ATOM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-ATOM-NEXT:    jne .LBB21_1
+; X86-ATOM-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-ATOM-NEXT:    leal {{[0-9]+}}(%esp), %esp
+; X86-ATOM-NEXT:    .cfi_def_cfa_offset 4
+; X86-ATOM-NEXT:    retl
+  atomicrmw fadd ptr %addr, float -0.0 monotonic
+  ret void
+}
+
+define void @atomic_fsub_zero(ptr %addr) {
+; CHECK-LABEL: @atomic_fsub_canon(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw fadd ptr [[ADDR:%.*]], float -0.000000e+00 release, align 4
+; CHECK-NEXT:    ret float [[RES]]
+;
+; X64-LABEL: atomic_fsub_zero:
+; X64:       # %bb.0:
+; X64-NEXT:    movl (%rdi), %eax
+; X64-NEXT:    .p2align 4
+; X64-NEXT:  .LBB22_1: # %atomicrmw.start
+; X64-NEXT:    # =>This Inner Loop Header: Depth=1
+; X64-NEXT:    lock cmpxchgl %eax, (%rdi)
+; X64-NEXT:    jne .LBB22_1
+; X64-NEXT:  # %bb.2: # %atomicrmw.end
+; X64-NEXT:    retq
+;
+; X86-SSE2-LABEL: atomic_fsub_zero:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SSE2-NEXT:    movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE2-NEXT:    .p2align 4
+; X86-SSE2-NEXT:  .LBB22_1: # %atomicrmw.start
+; X86-SSE2-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-SSE2-NEXT:    movd %xmm0, %eax
+; X86-SSE2-NEXT:    lock cmpxchgl %eax, (%ecx)
+; X86-SSE2-NEXT:    movd %eax, %xmm0
+; X86-SSE2-NEXT:    jne .LBB22_1
+; X86-SSE2-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-SSE2-NEXT:    retl
+;
+; X86-SLM-LABEL: atomic_fsub_zero:
+; X86-SLM:       # %bb.0:
+; X86-SLM-NEXT:    subl $8, %esp
+; X86-SLM-NEXT:    .cfi_def_cfa_offset 12
+; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-SLM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SLM-NEXT:    .p2align 4
+; X86-SLM-NEXT:  .LBB22_1: # %atomicrmw.start
+; X86-SLM-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-SLM-NEXT:    movss %xmm0, (%esp)
+; X86-SLM-NEXT:    movl (%esp), %eax
+; X86-SLM-NEXT:    lock cmpxchgl %eax, (%ecx)
+; X86-SLM-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-SLM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SLM-NEXT:    jne .LBB22_1
+; X86-SLM-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-SLM-NEXT:    addl $8, %esp
+; X86-SLM-NEXT:    .cfi_def_cfa_offset 4
+; X86-SLM-NEXT:    retl
+;
+; X86-ATOM-LABEL: atomic_fsub_zero:
+; X86-ATOM:       # %bb.0:
+; X86-ATOM-NEXT:    leal -{{[0-9]+}}(%esp), %esp
+; X86-ATOM-NEXT:    .cfi_def_cfa_offset 12
+; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
+; X86-ATOM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-ATOM-NEXT:    .p2align 4
+; X86-ATOM-NEXT:  .LBB22_1: # %atomicrmw.start
+; X86-ATOM-NEXT:    # =>This Inner Loop Header: Depth=1
+; X86-ATOM-NEXT:    movss %xmm0, (%esp)
+; X86-ATOM-NEXT:    movl (%esp), %eax
+; X86-ATOM-NEXT:    lock cmpxchgl %eax, (%ecx)
+; X86-ATOM-NEXT:    movl %eax, {{[0-9]+}}(%esp)
+; X86-ATOM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-ATOM-NEXT:    jne .LBB22_1
+; X86-ATOM-NEXT:  # %bb.2: # %atomicrmw.end
+; X86-ATOM-NEXT:    leal {{[0-9]+}}(%esp), %esp
+; X86-ATOM-NEXT:    .cfi_def_cfa_offset 4
+; X86-ATOM-NEXT:    retl
+  atomicrmw fsub ptr %addr, float 0.0 release
+  ret void
+}
+
 attributes #0 = { nounwind }
diff --git a/llvm/test/Transforms/InstCombine/atomicrmw.ll b/llvm/test/Transforms/InstCombine/atomicrmw.ll
index ca5ffd110ad61..b6c0e1e810f96 100644
--- a/llvm/test/Transforms/InstCombine/atomicrmw.ll
+++ b/llvm/test/Transforms/InstCombine/atomicrmw.ll
@@ -85,6 +85,26 @@ define i8 @atomic_max_smin_char(ptr %addr) {
   ret i8 %res
 }
 
+; Idempotent atomicrmw are still canonicalized.
+define i8 @atomic_min_umax_char(ptr %addr) {
+; CHECK-LABEL: @atomic_min_umax_char(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+  %res = atomicrmw umin ptr %addr, i8 255 monotonic
+  ret i8 %res
+}
+
+; Idempotent atomicrmw are still canonicalized.
+define i8 @atomic_max_umin_char(ptr %addr) {
+; CHECK-LABEL: @atomic_max_umin_char(
+; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
+; CHECK-NEXT:    ret i8 [[RES]]
+;
+  %res = atomicrmw umax ptr %addr, i8 0 monotonic
+  ret i8 %res
+}
+
 ; Idempotent atomicrmw are still canonicalized.
 define float @atomic_fsub_zero(ptr %addr) {
 ; CHECK-LABEL: @atomic_fsub_zero(

>From e3248f96760ab1623ca34a5e8ada6a7387dd8d43 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Sat, 31 May 2025 10:31:30 -0400
Subject: [PATCH 2/4] [AtomicExpandPass] Match isIdempotentRMW with
 InstcombineRMW

Add umin, smin, umax, smax to isIdempotentRMW
---
 llvm/lib/CodeGen/AtomicExpandPass.cpp      |  13 +-
 llvm/test/CodeGen/X86/atomic-idempotent.ll | 241 ++++++++++++---------
 2 files changed, 149 insertions(+), 105 deletions(-)

diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index c376de877ac7d..044f0732779f3 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -1570,12 +1570,12 @@ bool AtomicExpandImpl::expandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
 }
 
 bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
+  // TODO: Add floating point support.
   auto C = dyn_cast<ConstantInt>(RMWI->getValOperand());
   if (!C)
     return false;
 
-  AtomicRMWInst::BinOp Op = RMWI->getOperation();
-  switch (Op) {
+  switch (RMWI->getOperation()) {
   case AtomicRMWInst::Add:
   case AtomicRMWInst::Sub:
   case AtomicRMWInst::Or:
@@ -1583,7 +1583,14 @@ bool AtomicExpandImpl::isIdempotentRMW(AtomicRMWInst *RMWI) {
     return C->isZero();
   case AtomicRMWInst::And:
     return C->isMinusOne();
-  // FIXME: we could also treat Min/Max/UMin/UMax by the INT_MIN/INT_MAX/...
+  case AtomicRMWInst::Min:
+    return C->isMaxValue(true);
+  case AtomicRMWInst::Max:
+    return C->isMinValue(true);
+  case AtomicRMWInst::UMin:
+    return C->isMaxValue(false);
+  case AtomicRMWInst::UMax:
+    return C->isMinValue(false);
   default:
     return false;
   }
diff --git a/llvm/test/CodeGen/X86/atomic-idempotent.ll b/llvm/test/CodeGen/X86/atomic-idempotent.ll
index 1a5dd86ddaedb..f3b902f45196d 100644
--- a/llvm/test/CodeGen/X86/atomic-idempotent.ll
+++ b/llvm/test/CodeGen/X86/atomic-idempotent.ll
@@ -629,26 +629,32 @@ define void @atomic_umin_uint_max(ptr %addr) {
 ;
 ; X64-LABEL: atomic_umin_uint_max:
 ; X64:       # %bb.0:
+; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4
-; X64-NEXT:  .LBB15_1: # %atomicrmw.start
-; X64-NEXT:    # =>This Inner Loop Header: Depth=1
-; X64-NEXT:    lock cmpxchgl %eax, (%rdi)
-; X64-NEXT:    jne .LBB15_1
-; X64-NEXT:  # %bb.2: # %atomicrmw.end
 ; X64-NEXT:    retq
 ;
-; X86-LABEL: atomic_umin_uint_max:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4
-; X86-NEXT:  .LBB15_1: # %atomicrmw.start
-; X86-NEXT:    # =>This Inner Loop Header: Depth=1
-; X86-NEXT:    lock cmpxchgl %eax, (%ecx)
-; X86-NEXT:    jne .LBB15_1
-; X86-NEXT:  # %bb.2: # %atomicrmw.end
-; X86-NEXT:    retl
+; X86-SSE2-LABEL: atomic_umin_uint_max:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    mfence
+; X86-SSE2-NEXT:    movl (%eax), %eax
+; X86-SSE2-NEXT:    retl
+;
+; X86-SLM-LABEL: atomic_umin_uint_max:
+; X86-SLM:       # %bb.0:
+; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SLM-NEXT:    lock orl $0, (%esp)
+; X86-SLM-NEXT:    movl (%eax), %eax
+; X86-SLM-NEXT:    retl
+;
+; X86-ATOM-LABEL: atomic_umin_uint_max:
+; X86-ATOM:       # %bb.0:
+; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-ATOM-NEXT:    lock orl $0, (%esp)
+; X86-ATOM-NEXT:    movl (%eax), %eax
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    retl
   atomicrmw umin ptr %addr, i32 -1 seq_cst
   ret void
 }
@@ -660,26 +666,32 @@ define void @atomic_umax_zero(ptr %addr) {
 ;
 ; X64-LABEL: atomic_umax_zero:
 ; X64:       # %bb.0:
+; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movl (%rdi), %eax
-; X64-NEXT:    .p2align 4
-; X64-NEXT:  .LBB16_1: # %atomicrmw.start
-; X64-NEXT:    # =>This Inner Loop Header: Depth=1
-; X64-NEXT:    lock cmpxchgl %eax, (%rdi)
-; X64-NEXT:    jne .LBB16_1
-; X64-NEXT:  # %bb.2: # %atomicrmw.end
 ; X64-NEXT:    retq
 ;
-; X86-LABEL: atomic_umax_zero:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movl (%ecx), %eax
-; X86-NEXT:    .p2align 4
-; X86-NEXT:  .LBB16_1: # %atomicrmw.start
-; X86-NEXT:    # =>This Inner Loop Header: Depth=1
-; X86-NEXT:    lock cmpxchgl %eax, (%ecx)
-; X86-NEXT:    jne .LBB16_1
-; X86-NEXT:  # %bb.2: # %atomicrmw.end
-; X86-NEXT:    retl
+; X86-SSE2-LABEL: atomic_umax_zero:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    mfence
+; X86-SSE2-NEXT:    movl (%eax), %eax
+; X86-SSE2-NEXT:    retl
+;
+; X86-SLM-LABEL: atomic_umax_zero:
+; X86-SLM:       # %bb.0:
+; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SLM-NEXT:    lock orl $0, (%esp)
+; X86-SLM-NEXT:    movl (%eax), %eax
+; X86-SLM-NEXT:    retl
+;
+; X86-ATOM-LABEL: atomic_umax_zero:
+; X86-ATOM:       # %bb.0:
+; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-ATOM-NEXT:    lock orl $0, (%esp)
+; X86-ATOM-NEXT:    movl (%eax), %eax
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    retl
   atomicrmw umax ptr %addr, i32 0 seq_cst
   ret void
 }
@@ -691,26 +703,32 @@ define void @atomic_min_smax_char(ptr %addr) {
 ;
 ; X64-LABEL: atomic_min_smax_char:
 ; X64:       # %bb.0:
+; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4
-; X64-NEXT:  .LBB17_1: # %atomicrmw.start
-; X64-NEXT:    # =>This Inner Loop Header: Depth=1
-; X64-NEXT:    lock cmpxchgb %al, (%rdi)
-; X64-NEXT:    jne .LBB17_1
-; X64-NEXT:  # %bb.2: # %atomicrmw.end
 ; X64-NEXT:    retq
 ;
-; X86-LABEL: atomic_min_smax_char:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzbl (%ecx), %eax
-; X86-NEXT:    .p2align 4
-; X86-NEXT:  .LBB17_1: # %atomicrmw.start
-; X86-NEXT:    # =>This Inner Loop Header: Depth=1
-; X86-NEXT:    lock cmpxchgb %al, (%ecx)
-; X86-NEXT:    jne .LBB17_1
-; X86-NEXT:  # %bb.2: # %atomicrmw.end
-; X86-NEXT:    retl
+; X86-SSE2-LABEL: atomic_min_smax_char:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    mfence
+; X86-SSE2-NEXT:    movzbl (%eax), %eax
+; X86-SSE2-NEXT:    retl
+;
+; X86-SLM-LABEL: atomic_min_smax_char:
+; X86-SLM:       # %bb.0:
+; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SLM-NEXT:    lock orl $0, (%esp)
+; X86-SLM-NEXT:    movzbl (%eax), %eax
+; X86-SLM-NEXT:    retl
+;
+; X86-ATOM-LABEL: atomic_min_smax_char:
+; X86-ATOM:       # %bb.0:
+; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-ATOM-NEXT:    lock orl $0, (%esp)
+; X86-ATOM-NEXT:    movzbl (%eax), %eax
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    retl
   atomicrmw min ptr %addr, i8 127 seq_cst
   ret void
 }
@@ -722,26 +740,32 @@ define void @atomic_max_smin_char(ptr %addr) {
 ;
 ; X64-LABEL: atomic_max_smin_char:
 ; X64:       # %bb.0:
+; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4
-; X64-NEXT:  .LBB18_1: # %atomicrmw.start
-; X64-NEXT:    # =>This Inner Loop Header: Depth=1
-; X64-NEXT:    lock cmpxchgb %al, (%rdi)
-; X64-NEXT:    jne .LBB18_1
-; X64-NEXT:  # %bb.2: # %atomicrmw.end
 ; X64-NEXT:    retq
 ;
-; X86-LABEL: atomic_max_smin_char:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzbl (%ecx), %eax
-; X86-NEXT:    .p2align 4
-; X86-NEXT:  .LBB18_1: # %atomicrmw.start
-; X86-NEXT:    # =>This Inner Loop Header: Depth=1
-; X86-NEXT:    lock cmpxchgb %al, (%ecx)
-; X86-NEXT:    jne .LBB18_1
-; X86-NEXT:  # %bb.2: # %atomicrmw.end
-; X86-NEXT:    retl
+; X86-SSE2-LABEL: atomic_max_smin_char:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    mfence
+; X86-SSE2-NEXT:    movzbl (%eax), %eax
+; X86-SSE2-NEXT:    retl
+;
+; X86-SLM-LABEL: atomic_max_smin_char:
+; X86-SLM:       # %bb.0:
+; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SLM-NEXT:    lock orl $0, (%esp)
+; X86-SLM-NEXT:    movzbl (%eax), %eax
+; X86-SLM-NEXT:    retl
+;
+; X86-ATOM-LABEL: atomic_max_smin_char:
+; X86-ATOM:       # %bb.0:
+; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-ATOM-NEXT:    lock orl $0, (%esp)
+; X86-ATOM-NEXT:    movzbl (%eax), %eax
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    retl
   atomicrmw max ptr %addr, i8 -128 seq_cst
   ret void
 }
@@ -753,26 +777,32 @@ define void @atomic_min_umax_char(ptr %addr) {
 ;
 ; X64-LABEL: atomic_min_umax_char:
 ; X64:       # %bb.0:
+; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4
-; X64-NEXT:  .LBB19_1: # %atomicrmw.start
-; X64-NEXT:    # =>This Inner Loop Header: Depth=1
-; X64-NEXT:    lock cmpxchgb %al, (%rdi)
-; X64-NEXT:    jne .LBB19_1
-; X64-NEXT:  # %bb.2: # %atomicrmw.end
 ; X64-NEXT:    retq
 ;
-; X86-LABEL: atomic_min_umax_char:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzbl (%ecx), %eax
-; X86-NEXT:    .p2align 4
-; X86-NEXT:  .LBB19_1: # %atomicrmw.start
-; X86-NEXT:    # =>This Inner Loop Header: Depth=1
-; X86-NEXT:    lock cmpxchgb %al, (%ecx)
-; X86-NEXT:    jne .LBB19_1
-; X86-NEXT:  # %bb.2: # %atomicrmw.end
-; X86-NEXT:    retl
+; X86-SSE2-LABEL: atomic_min_umax_char:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    mfence
+; X86-SSE2-NEXT:    movzbl (%eax), %eax
+; X86-SSE2-NEXT:    retl
+;
+; X86-SLM-LABEL: atomic_min_umax_char:
+; X86-SLM:       # %bb.0:
+; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SLM-NEXT:    lock orl $0, (%esp)
+; X86-SLM-NEXT:    movzbl (%eax), %eax
+; X86-SLM-NEXT:    retl
+;
+; X86-ATOM-LABEL: atomic_min_umax_char:
+; X86-ATOM:       # %bb.0:
+; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-ATOM-NEXT:    lock orl $0, (%esp)
+; X86-ATOM-NEXT:    movzbl (%eax), %eax
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    retl
   atomicrmw umin ptr %addr, i8 255 seq_cst
   ret void
 }
@@ -784,30 +814,37 @@ define void @atomic_max_umin_char(ptr %addr) {
 ;
 ; X64-LABEL: atomic_max_umin_char:
 ; X64:       # %bb.0:
+; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
 ; X64-NEXT:    movzbl (%rdi), %eax
-; X64-NEXT:    .p2align 4
-; X64-NEXT:  .LBB20_1: # %atomicrmw.start
-; X64-NEXT:    # =>This Inner Loop Header: Depth=1
-; X64-NEXT:    lock cmpxchgb %al, (%rdi)
-; X64-NEXT:    jne .LBB20_1
-; X64-NEXT:  # %bb.2: # %atomicrmw.end
 ; X64-NEXT:    retq
 ;
-; X86-LABEL: atomic_max_umin_char:
-; X86:       # %bb.0:
-; X86-NEXT:    movl {{[0-9]+}}(%esp), %ecx
-; X86-NEXT:    movzbl (%ecx), %eax
-; X86-NEXT:    .p2align 4
-; X86-NEXT:  .LBB20_1: # %atomicrmw.start
-; X86-NEXT:    # =>This Inner Loop Header: Depth=1
-; X86-NEXT:    lock cmpxchgb %al, (%ecx)
-; X86-NEXT:    jne .LBB20_1
-; X86-NEXT:  # %bb.2: # %atomicrmw.end
-; X86-NEXT:    retl
+; X86-SSE2-LABEL: atomic_max_umin_char:
+; X86-SSE2:       # %bb.0:
+; X86-SSE2-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SSE2-NEXT:    mfence
+; X86-SSE2-NEXT:    movzbl (%eax), %eax
+; X86-SSE2-NEXT:    retl
+;
+; X86-SLM-LABEL: atomic_max_umin_char:
+; X86-SLM:       # %bb.0:
+; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-SLM-NEXT:    lock orl $0, (%esp)
+; X86-SLM-NEXT:    movzbl (%eax), %eax
+; X86-SLM-NEXT:    retl
+;
+; X86-ATOM-LABEL: atomic_max_umin_char:
+; X86-ATOM:       # %bb.0:
+; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %eax
+; X86-ATOM-NEXT:    lock orl $0, (%esp)
+; X86-ATOM-NEXT:    movzbl (%eax), %eax
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    nop
+; X86-ATOM-NEXT:    retl
   atomicrmw umax ptr %addr, i8 0 seq_cst
   ret void
 }
 
+; TODO: Add floating point support.
 define void @atomic_fadd_zero(ptr %addr) {
 ; CHECK-LABEL: @atomic_fadd_zero(
 ; CHECK-NEXT:    [[RES:%.*]] = atomicrmw fadd ptr [[ADDR:%.*]], float -0.000000e+00 monotonic, align 4

>From c1e73c7f2185bece01b842a1dfb4f09dbbb711c4 Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Tue, 3 Jun 2025 19:19:51 -0400
Subject: [PATCH 3/4] Address comments

---
 llvm/test/CodeGen/X86/atomic-idempotent.ll    | 24 +++++++------------
 llvm/test/Transforms/InstCombine/atomicrmw.ll | 20 ----------------
 2 files changed, 8 insertions(+), 36 deletions(-)

diff --git a/llvm/test/CodeGen/X86/atomic-idempotent.ll b/llvm/test/CodeGen/X86/atomic-idempotent.ll
index f3b902f45196d..b42554094930e 100644
--- a/llvm/test/CodeGen/X86/atomic-idempotent.ll
+++ b/llvm/test/CodeGen/X86/atomic-idempotent.ll
@@ -622,7 +622,7 @@ define void @or8_nouse_seq_cst(ptr %p) #0 {
   ret void
 }
 
-define void @atomic_umin_uint_max(ptr %addr) {
+define void @atomic_umin_uint_max(ptr %addr) #0 {
 ; CHECK-LABEL: @atomic_umin_uint_max(
 ; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i32 0 monotonic, align 4
 ; CHECK-NEXT:    ret i32 [[RES]]
@@ -659,7 +659,7 @@ define void @atomic_umin_uint_max(ptr %addr) {
   ret void
 }
 
-define void @atomic_umax_zero(ptr %addr) {
+define void @atomic_umax_zero(ptr %addr) #0 {
 ; CHECK-LABEL: @atomic_umax_zero(
 ; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i32 0 monotonic, align 4
 ; CHECK-NEXT:    ret i32 [[RES]]
@@ -696,7 +696,7 @@ define void @atomic_umax_zero(ptr %addr) {
   ret void
 }
 
-define void @atomic_min_smax_char(ptr %addr) {
+define void @atomic_min_smax_char(ptr %addr) #0 {
 ; CHECK-LABEL: @atomic_min_smax_char(
 ; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
 ; CHECK-NEXT:    ret i8 [[RES]]
@@ -733,7 +733,7 @@ define void @atomic_min_smax_char(ptr %addr) {
   ret void
 }
 
-define void @atomic_max_smin_char(ptr %addr) {
+define void @atomic_max_smin_char(ptr %addr) #0 {
 ; CHECK-LABEL: @atomic_max_smin_char(
 ; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
 ; CHECK-NEXT:    ret i8 [[RES]]
@@ -770,7 +770,7 @@ define void @atomic_max_smin_char(ptr %addr) {
   ret void
 }
 
-define void @atomic_min_umax_char(ptr %addr) {
+define void @atomic_min_umax_char(ptr %addr) #0 {
 ; CHECK-LABEL: @atomic_min_umax_char(
 ; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
 ; CHECK-NEXT:    ret i8 [[RES]]
@@ -807,7 +807,7 @@ define void @atomic_min_umax_char(ptr %addr) {
   ret void
 }
 
-define void @atomic_max_umin_char(ptr %addr) {
+define void @atomic_max_umin_char(ptr %addr) #0 {
 ; CHECK-LABEL: @atomic_max_umin_char(
 ; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
 ; CHECK-NEXT:    ret i8 [[RES]]
@@ -845,7 +845,7 @@ define void @atomic_max_umin_char(ptr %addr) {
 }
 
 ; TODO: Add floating point support.
-define void @atomic_fadd_zero(ptr %addr) {
+define void @atomic_fadd_zero(ptr %addr) #0 {
 ; CHECK-LABEL: @atomic_fadd_zero(
 ; CHECK-NEXT:    [[RES:%.*]] = atomicrmw fadd ptr [[ADDR:%.*]], float -0.000000e+00 monotonic, align 4
 ; CHECK-NEXT:    ret float [[RES]]
@@ -878,7 +878,6 @@ define void @atomic_fadd_zero(ptr %addr) {
 ; X86-SLM-LABEL: atomic_fadd_zero:
 ; X86-SLM:       # %bb.0:
 ; X86-SLM-NEXT:    subl $8, %esp
-; X86-SLM-NEXT:    .cfi_def_cfa_offset 12
 ; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SLM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X86-SLM-NEXT:    .p2align 4
@@ -892,13 +891,11 @@ define void @atomic_fadd_zero(ptr %addr) {
 ; X86-SLM-NEXT:    jne .LBB21_1
 ; X86-SLM-NEXT:  # %bb.2: # %atomicrmw.end
 ; X86-SLM-NEXT:    addl $8, %esp
-; X86-SLM-NEXT:    .cfi_def_cfa_offset 4
 ; X86-SLM-NEXT:    retl
 ;
 ; X86-ATOM-LABEL: atomic_fadd_zero:
 ; X86-ATOM:       # %bb.0:
 ; X86-ATOM-NEXT:    leal -{{[0-9]+}}(%esp), %esp
-; X86-ATOM-NEXT:    .cfi_def_cfa_offset 12
 ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-ATOM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X86-ATOM-NEXT:    .p2align 4
@@ -912,13 +909,12 @@ define void @atomic_fadd_zero(ptr %addr) {
 ; X86-ATOM-NEXT:    jne .LBB21_1
 ; X86-ATOM-NEXT:  # %bb.2: # %atomicrmw.end
 ; X86-ATOM-NEXT:    leal {{[0-9]+}}(%esp), %esp
-; X86-ATOM-NEXT:    .cfi_def_cfa_offset 4
 ; X86-ATOM-NEXT:    retl
   atomicrmw fadd ptr %addr, float -0.0 monotonic
   ret void
 }
 
-define void @atomic_fsub_zero(ptr %addr) {
+define void @atomic_fsub_zero(ptr %addr) #0 {
 ; CHECK-LABEL: @atomic_fsub_canon(
 ; CHECK-NEXT:    [[RES:%.*]] = atomicrmw fadd ptr [[ADDR:%.*]], float -0.000000e+00 release, align 4
 ; CHECK-NEXT:    ret float [[RES]]
@@ -951,7 +947,6 @@ define void @atomic_fsub_zero(ptr %addr) {
 ; X86-SLM-LABEL: atomic_fsub_zero:
 ; X86-SLM:       # %bb.0:
 ; X86-SLM-NEXT:    subl $8, %esp
-; X86-SLM-NEXT:    .cfi_def_cfa_offset 12
 ; X86-SLM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-SLM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X86-SLM-NEXT:    .p2align 4
@@ -965,13 +960,11 @@ define void @atomic_fsub_zero(ptr %addr) {
 ; X86-SLM-NEXT:    jne .LBB22_1
 ; X86-SLM-NEXT:  # %bb.2: # %atomicrmw.end
 ; X86-SLM-NEXT:    addl $8, %esp
-; X86-SLM-NEXT:    .cfi_def_cfa_offset 4
 ; X86-SLM-NEXT:    retl
 ;
 ; X86-ATOM-LABEL: atomic_fsub_zero:
 ; X86-ATOM:       # %bb.0:
 ; X86-ATOM-NEXT:    leal -{{[0-9]+}}(%esp), %esp
-; X86-ATOM-NEXT:    .cfi_def_cfa_offset 12
 ; X86-ATOM-NEXT:    movl {{[0-9]+}}(%esp), %ecx
 ; X86-ATOM-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
 ; X86-ATOM-NEXT:    .p2align 4
@@ -985,7 +978,6 @@ define void @atomic_fsub_zero(ptr %addr) {
 ; X86-ATOM-NEXT:    jne .LBB22_1
 ; X86-ATOM-NEXT:  # %bb.2: # %atomicrmw.end
 ; X86-ATOM-NEXT:    leal {{[0-9]+}}(%esp), %esp
-; X86-ATOM-NEXT:    .cfi_def_cfa_offset 4
 ; X86-ATOM-NEXT:    retl
   atomicrmw fsub ptr %addr, float 0.0 release
   ret void
diff --git a/llvm/test/Transforms/InstCombine/atomicrmw.ll b/llvm/test/Transforms/InstCombine/atomicrmw.ll
index b6c0e1e810f96..ca5ffd110ad61 100644
--- a/llvm/test/Transforms/InstCombine/atomicrmw.ll
+++ b/llvm/test/Transforms/InstCombine/atomicrmw.ll
@@ -85,26 +85,6 @@ define i8 @atomic_max_smin_char(ptr %addr) {
   ret i8 %res
 }
 
-; Idempotent atomicrmw are still canonicalized.
-define i8 @atomic_min_umax_char(ptr %addr) {
-; CHECK-LABEL: @atomic_min_umax_char(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
-; CHECK-NEXT:    ret i8 [[RES]]
-;
-  %res = atomicrmw umin ptr %addr, i8 255 monotonic
-  ret i8 %res
-}
-
-; Idempotent atomicrmw are still canonicalized.
-define i8 @atomic_max_umin_char(ptr %addr) {
-; CHECK-LABEL: @atomic_max_umin_char(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
-; CHECK-NEXT:    ret i8 [[RES]]
-;
-  %res = atomicrmw umax ptr %addr, i8 0 monotonic
-  ret i8 %res
-}
-
 ; Idempotent atomicrmw are still canonicalized.
 define float @atomic_fsub_zero(ptr %addr) {
 ; CHECK-LABEL: @atomic_fsub_zero(

>From 1573eb2dc2226d93f2452a81c954129d16dbdf6a Mon Sep 17 00:00:00 2001
From: Rose <gfunni234 at gmail.com>
Date: Thu, 5 Jun 2025 16:46:37 -0400
Subject: [PATCH 4/4] Remove copy-paste error

---
 llvm/test/CodeGen/X86/atomic-idempotent.ll | 32 ----------------------
 1 file changed, 32 deletions(-)

diff --git a/llvm/test/CodeGen/X86/atomic-idempotent.ll b/llvm/test/CodeGen/X86/atomic-idempotent.ll
index b42554094930e..01c3e7999a92c 100644
--- a/llvm/test/CodeGen/X86/atomic-idempotent.ll
+++ b/llvm/test/CodeGen/X86/atomic-idempotent.ll
@@ -623,10 +623,6 @@ define void @or8_nouse_seq_cst(ptr %p) #0 {
 }
 
 define void @atomic_umin_uint_max(ptr %addr) #0 {
-; CHECK-LABEL: @atomic_umin_uint_max(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i32 0 monotonic, align 4
-; CHECK-NEXT:    ret i32 [[RES]]
-;
 ; X64-LABEL: atomic_umin_uint_max:
 ; X64:       # %bb.0:
 ; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
@@ -660,10 +656,6 @@ define void @atomic_umin_uint_max(ptr %addr) #0 {
 }
 
 define void @atomic_umax_zero(ptr %addr) #0 {
-; CHECK-LABEL: @atomic_umax_zero(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i32 0 monotonic, align 4
-; CHECK-NEXT:    ret i32 [[RES]]
-;
 ; X64-LABEL: atomic_umax_zero:
 ; X64:       # %bb.0:
 ; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
@@ -697,10 +689,6 @@ define void @atomic_umax_zero(ptr %addr) #0 {
 }
 
 define void @atomic_min_smax_char(ptr %addr) #0 {
-; CHECK-LABEL: @atomic_min_smax_char(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
-; CHECK-NEXT:    ret i8 [[RES]]
-;
 ; X64-LABEL: atomic_min_smax_char:
 ; X64:       # %bb.0:
 ; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
@@ -734,10 +722,6 @@ define void @atomic_min_smax_char(ptr %addr) #0 {
 }
 
 define void @atomic_max_smin_char(ptr %addr) #0 {
-; CHECK-LABEL: @atomic_max_smin_char(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
-; CHECK-NEXT:    ret i8 [[RES]]
-;
 ; X64-LABEL: atomic_max_smin_char:
 ; X64:       # %bb.0:
 ; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
@@ -771,10 +755,6 @@ define void @atomic_max_smin_char(ptr %addr) #0 {
 }
 
 define void @atomic_min_umax_char(ptr %addr) #0 {
-; CHECK-LABEL: @atomic_min_umax_char(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
-; CHECK-NEXT:    ret i8 [[RES]]
-;
 ; X64-LABEL: atomic_min_umax_char:
 ; X64:       # %bb.0:
 ; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
@@ -808,10 +788,6 @@ define void @atomic_min_umax_char(ptr %addr) #0 {
 }
 
 define void @atomic_max_umin_char(ptr %addr) #0 {
-; CHECK-LABEL: @atomic_max_umin_char(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i8 0 monotonic, align 1
-; CHECK-NEXT:    ret i8 [[RES]]
-;
 ; X64-LABEL: atomic_max_umin_char:
 ; X64:       # %bb.0:
 ; X64-NEXT:    lock orl $0, -{{[0-9]+}}(%rsp)
@@ -846,10 +822,6 @@ define void @atomic_max_umin_char(ptr %addr) #0 {
 
 ; TODO: Add floating point support.
 define void @atomic_fadd_zero(ptr %addr) #0 {
-; CHECK-LABEL: @atomic_fadd_zero(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw fadd ptr [[ADDR:%.*]], float -0.000000e+00 monotonic, align 4
-; CHECK-NEXT:    ret float [[RES]]
-;
 ; X64-LABEL: atomic_fadd_zero:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax
@@ -915,10 +887,6 @@ define void @atomic_fadd_zero(ptr %addr) #0 {
 }
 
 define void @atomic_fsub_zero(ptr %addr) #0 {
-; CHECK-LABEL: @atomic_fsub_canon(
-; CHECK-NEXT:    [[RES:%.*]] = atomicrmw fadd ptr [[ADDR:%.*]], float -0.000000e+00 release, align 4
-; CHECK-NEXT:    ret float [[RES]]
-;
 ; X64-LABEL: atomic_fsub_zero:
 ; X64:       # %bb.0:
 ; X64-NEXT:    movl (%rdi), %eax



More information about the llvm-commits mailing list