[llvm] AMDGPU: Don't bitcast float typed atomic store in IR (PR #90116)

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Wed May 1 12:38:06 PDT 2024


https://github.com/arsenm updated https://github.com/llvm/llvm-project/pull/90116

>From b82cd2b5f4d1bc4ff9df4598d8dc6ac8240a9cb1 Mon Sep 17 00:00:00 2001
From: Matt Arsenault <Matthew.Arsenault at amd.com>
Date: Thu, 25 Apr 2024 16:19:22 +0200
Subject: [PATCH] AMDGPU: Don't bitcast float typed atomic store in IR

Implement the promotion in the DAG.
---
 llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp | 29 +++++++----
 .../SelectionDAG/LegalizeFloatTypes.cpp       | 34 +++++++++++++
 llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h |  2 +
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 12 +++++
 llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h   |  4 ++
 .../AMDGPU/no-expand-atomic-store.ll          | 51 +++++++------------
 6 files changed, 91 insertions(+), 41 deletions(-)

diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index bfc3e08c1632de..b3ae419b20fec2 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -5006,7 +5006,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
       Node->getOpcode() == ISD::INSERT_VECTOR_ELT) {
     OVT = Node->getOperand(0).getSimpleValueType();
   }
-  if (Node->getOpcode() == ISD::STRICT_UINT_TO_FP ||
+  if (Node->getOpcode() == ISD::ATOMIC_STORE ||
+      Node->getOpcode() == ISD::STRICT_UINT_TO_FP ||
       Node->getOpcode() == ISD::STRICT_SINT_TO_FP ||
       Node->getOpcode() == ISD::STRICT_FSETCC ||
       Node->getOpcode() == ISD::STRICT_FSETCCS ||
@@ -5622,7 +5623,8 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
     Results.push_back(CvtVec);
     break;
   }
-  case ISD::ATOMIC_SWAP: {
+  case ISD::ATOMIC_SWAP:
+  case ISD::ATOMIC_STORE: {
     AtomicSDNode *AM = cast<AtomicSDNode>(Node);
     SDLoc SL(Node);
     SDValue CastVal = DAG.getNode(ISD::BITCAST, SL, NVT, AM->getVal());
@@ -5631,13 +5633,22 @@ void SelectionDAGLegalize::PromoteNode(SDNode *Node) {
     assert(AM->getMemoryVT().getSizeInBits() == NVT.getSizeInBits() &&
            "unexpected atomic_swap with illegal type");
 
-    SDValue NewAtomic
-      = DAG.getAtomic(ISD::ATOMIC_SWAP, SL, NVT,
-                      DAG.getVTList(NVT, MVT::Other),
-                      { AM->getChain(), AM->getBasePtr(), CastVal },
-                      AM->getMemOperand());
-    Results.push_back(DAG.getNode(ISD::BITCAST, SL, OVT, NewAtomic));
-    Results.push_back(NewAtomic.getValue(1));
+    SDValue Op0 = AM->getBasePtr();
+    SDValue Op1 = CastVal;
+
+    // ATOMIC_STORE uses a swapped operand order from every other AtomicSDNode,
+    // but really it should merge with ISD::STORE.
+    if (AM->getOpcode() == ISD::ATOMIC_STORE)
+      std::swap(Op0, Op1);
+
+    SDValue NewAtomic = DAG.getAtomic(AM->getOpcode(), SL, NVT, AM->getChain(),
+                                      Op0, Op1, AM->getMemOperand());
+
+    if (AM->getOpcode() != ISD::ATOMIC_STORE) {
+      Results.push_back(DAG.getNode(ISD::BITCAST, SL, OVT, NewAtomic));
+      Results.push_back(NewAtomic.getValue(1));
+    } else
+      Results.push_back(NewAtomic);
     break;
   }
   case ISD::ATOMIC_LOAD: {
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
index abe5be76382556..cec9b4b4eaa34d 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeFloatTypes.cpp
@@ -2249,6 +2249,7 @@ bool DAGTypeLegalizer::PromoteFloatOperand(SDNode *N, unsigned OpNo) {
     case ISD::SELECT_CC:  R = PromoteFloatOp_SELECT_CC(N, OpNo); break;
     case ISD::SETCC:      R = PromoteFloatOp_SETCC(N, OpNo); break;
     case ISD::STORE:      R = PromoteFloatOp_STORE(N, OpNo); break;
+    case ISD::ATOMIC_STORE: R = PromoteFloatOp_ATOMIC_STORE(N, OpNo); break;
   }
   // clang-format on
 
@@ -2371,6 +2372,23 @@ SDValue DAGTypeLegalizer::PromoteFloatOp_STORE(SDNode *N, unsigned OpNo) {
                       ST->getMemOperand());
 }
 
+SDValue DAGTypeLegalizer::PromoteFloatOp_ATOMIC_STORE(SDNode *N,
+                                                      unsigned OpNo) {
+  AtomicSDNode *ST = cast<AtomicSDNode>(N);
+  SDValue Val = ST->getVal();
+  SDLoc DL(N);
+
+  SDValue Promoted = GetPromotedFloat(Val);
+  EVT VT = ST->getOperand(1).getValueType();
+  EVT IVT = EVT::getIntegerVT(*DAG.getContext(), VT.getSizeInBits());
+
+  SDValue NewVal = DAG.getNode(GetPromotionOpcode(Promoted.getValueType(), VT),
+                               DL, IVT, Promoted);
+
+  return DAG.getAtomic(ISD::ATOMIC_STORE, DL, IVT, ST->getChain(), NewVal,
+                       ST->getBasePtr(), ST->getMemOperand());
+}
+
 //===----------------------------------------------------------------------===//
 //  Float Result Promotion
 //===----------------------------------------------------------------------===//
@@ -3193,6 +3211,9 @@ bool DAGTypeLegalizer::SoftPromoteHalfOperand(SDNode *N, unsigned OpNo) {
   case ISD::SELECT_CC:  Res = SoftPromoteHalfOp_SELECT_CC(N, OpNo); break;
   case ISD::SETCC:      Res = SoftPromoteHalfOp_SETCC(N); break;
   case ISD::STORE:      Res = SoftPromoteHalfOp_STORE(N, OpNo); break;
+  case ISD::ATOMIC_STORE:
+    Res = SoftPromoteHalfOp_ATOMIC_STORE(N, OpNo);
+    break;
   case ISD::STACKMAP:
     Res = SoftPromoteHalfOp_STACKMAP(N, OpNo);
     break;
@@ -3346,6 +3367,19 @@ SDValue DAGTypeLegalizer::SoftPromoteHalfOp_STORE(SDNode *N, unsigned OpNo) {
                       ST->getMemOperand());
 }
 
+SDValue DAGTypeLegalizer::SoftPromoteHalfOp_ATOMIC_STORE(SDNode *N,
+                                                         unsigned OpNo) {
+  assert(OpNo == 1 && "Can only soften the stored value!");
+  AtomicSDNode *ST = cast<AtomicSDNode>(N);
+  SDValue Val = ST->getVal();
+  SDLoc dl(N);
+
+  SDValue Promoted = GetSoftPromotedHalf(Val);
+  return DAG.getAtomic(ISD::ATOMIC_STORE, dl, Promoted.getValueType(),
+                       ST->getChain(), Promoted, ST->getBasePtr(),
+                       ST->getMemOperand());
+}
+
 SDValue DAGTypeLegalizer::SoftPromoteHalfOp_STACKMAP(SDNode *N, unsigned OpNo) {
   assert(OpNo > 1); // Because the first two arguments are guaranteed legal.
   SmallVector<SDValue> NewOps(N->ops().begin(), N->ops().end());
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
index 49be824deb5134..a535edb6523ba8 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h
@@ -710,6 +710,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue PromoteFloatOp_UnaryOp(SDNode *N, unsigned OpNo);
   SDValue PromoteFloatOp_FP_TO_XINT_SAT(SDNode *N, unsigned OpNo);
   SDValue PromoteFloatOp_STORE(SDNode *N, unsigned OpNo);
+  SDValue PromoteFloatOp_ATOMIC_STORE(SDNode *N, unsigned OpNo);
   SDValue PromoteFloatOp_SELECT_CC(SDNode *N, unsigned OpNo);
   SDValue PromoteFloatOp_SETCC(SDNode *N, unsigned OpNo);
 
@@ -754,6 +755,7 @@ class LLVM_LIBRARY_VISIBILITY DAGTypeLegalizer {
   SDValue SoftPromoteHalfOp_SETCC(SDNode *N);
   SDValue SoftPromoteHalfOp_SELECT_CC(SDNode *N, unsigned OpNo);
   SDValue SoftPromoteHalfOp_STORE(SDNode *N, unsigned OpNo);
+  SDValue SoftPromoteHalfOp_ATOMIC_STORE(SDNode *N, unsigned OpNo);
   SDValue SoftPromoteHalfOp_STACKMAP(SDNode *N, unsigned OpNo);
   SDValue SoftPromoteHalfOp_PATCHPOINT(SDNode *N, unsigned OpNo);
 
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 3124fb23fb0be7..0a26e20c7358d5 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -161,6 +161,18 @@ AMDGPUTargetLowering::AMDGPUTargetLowering(const TargetMachine &TM,
   setOperationAction(ISD::ATOMIC_LOAD, MVT::bf16, Promote);
   AddPromotedToType(ISD::ATOMIC_LOAD, MVT::bf16, MVT::i16);
 
+  setOperationAction(ISD::ATOMIC_STORE, MVT::f32, Promote);
+  AddPromotedToType(ISD::ATOMIC_STORE, MVT::f32, MVT::i32);
+
+  setOperationAction(ISD::ATOMIC_STORE, MVT::f64, Promote);
+  AddPromotedToType(ISD::ATOMIC_STORE, MVT::f64, MVT::i64);
+
+  setOperationAction(ISD::ATOMIC_STORE, MVT::f16, Promote);
+  AddPromotedToType(ISD::ATOMIC_STORE, MVT::f16, MVT::i16);
+
+  setOperationAction(ISD::ATOMIC_STORE, MVT::bf16, Promote);
+  AddPromotedToType(ISD::ATOMIC_STORE, MVT::bf16, MVT::i16);
+
   // There are no 64-bit extloads. These should be done as a 32-bit extload and
   // an extension to 64-bit.
   for (MVT VT : MVT::integer_valuetypes())
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index 269c414521dbcc..8971b6c76ee38b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -236,6 +236,10 @@ class AMDGPUTargetLowering : public TargetLowering {
     return AtomicExpansionKind::None;
   }
 
+  AtomicExpansionKind shouldCastAtomicStoreInIR(StoreInst *SI) const override {
+    return AtomicExpansionKind::None;
+  }
+
   static CCAssignFn *CCAssignFnForCall(CallingConv::ID CC, bool IsVarArg);
   static CCAssignFn *CCAssignFnForReturn(CallingConv::ID CC, bool IsVarArg);
 
diff --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-store.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-store.ll
index db0c3a20e62f48..9159393ab887e2 100644
--- a/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-store.ll
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-store.ll
@@ -4,8 +4,7 @@
 define void @store_atomic_f32_global_system(float %val, ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f32_global_system(
 ; CHECK-SAME: float [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL]] to i32
-; CHECK-NEXT:    store atomic i32 [[TMP1]], ptr addrspace(1) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    store atomic float [[VAL]], ptr addrspace(1) [[PTR]] seq_cst, align 4, !some.unknown.md [[META0:![0-9]+]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic float %val, ptr addrspace(1) %ptr seq_cst, align 4, !some.unknown.md !0
@@ -15,8 +14,7 @@ define void @store_atomic_f32_global_system(float %val, ptr addrspace(1) %ptr) {
 define void @store_atomic_f32_global_agent(float %val, ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f32_global_agent(
 ; CHECK-SAME: float [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL]] to i32
-; CHECK-NEXT:    store atomic i32 [[TMP1]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT:    store atomic float [[VAL]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic float %val, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 4, !some.unknown.md !0
@@ -26,8 +24,7 @@ define void @store_atomic_f32_global_agent(float %val, ptr addrspace(1) %ptr) {
 define void @store_atomic_f32_local(float %val, ptr addrspace(3) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f32_local(
 ; CHECK-SAME: float [[VAL:%.*]], ptr addrspace(3) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL]] to i32
-; CHECK-NEXT:    store atomic i32 [[TMP1]], ptr addrspace(3) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    store atomic float [[VAL]], ptr addrspace(3) [[PTR]] seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic float %val, ptr addrspace(3) %ptr seq_cst, align 4, !some.unknown.md !0
@@ -37,8 +34,7 @@ define void @store_atomic_f32_local(float %val, ptr addrspace(3) %ptr) {
 define void @store_atomic_f32_flat(float %val, ptr %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f32_flat(
 ; CHECK-SAME: float [[VAL:%.*]], ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL]] to i32
-; CHECK-NEXT:    store atomic i32 [[TMP1]], ptr [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    store atomic float [[VAL]], ptr [[PTR]] seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic float %val, ptr %ptr seq_cst, align 4, !some.unknown.md !0
@@ -48,8 +44,7 @@ define void @store_atomic_f32_flat(float %val, ptr %ptr) {
 define void @store_atomic_f16_global_system(half %val, ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f16_global_system(
 ; CHECK-SAME: half [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL]] to i16
-; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(1) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    store atomic half [[VAL]], ptr addrspace(1) [[PTR]] seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic half %val, ptr addrspace(1) %ptr seq_cst, align 4, !some.unknown.md !0
@@ -59,8 +54,7 @@ define void @store_atomic_f16_global_system(half %val, ptr addrspace(1) %ptr) {
 define void @store_atomic_f16_global_agent(half %val, ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f16_global_agent(
 ; CHECK-SAME: half [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL]] to i16
-; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT:    store atomic half [[VAL]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic half %val, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 4, !some.unknown.md !0
@@ -70,8 +64,7 @@ define void @store_atomic_f16_global_agent(half %val, ptr addrspace(1) %ptr) {
 define void @store_atomic_f16_local(half %val, ptr addrspace(3) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f16_local(
 ; CHECK-SAME: half [[VAL:%.*]], ptr addrspace(3) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL]] to i16
-; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(3) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    store atomic half [[VAL]], ptr addrspace(3) [[PTR]] seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic half %val, ptr addrspace(3) %ptr seq_cst, align 4, !some.unknown.md !0
@@ -81,8 +74,7 @@ define void @store_atomic_f16_local(half %val, ptr addrspace(3) %ptr) {
 define void @store_atomic_f16_flat(half %val, ptr %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f16_flat(
 ; CHECK-SAME: half [[VAL:%.*]], ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL]] to i16
-; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    store atomic half [[VAL]], ptr [[PTR]] seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic half %val, ptr %ptr seq_cst, align 4, !some.unknown.md !0
@@ -92,8 +84,7 @@ define void @store_atomic_f16_flat(half %val, ptr %ptr) {
 define void @store_atomic_bf16_global_system(bfloat %val, ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_bf16_global_system(
 ; CHECK-SAME: bfloat [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast bfloat [[VAL]] to i16
-; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(1) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    store atomic bfloat [[VAL]], ptr addrspace(1) [[PTR]] seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic bfloat %val, ptr addrspace(1) %ptr seq_cst, align 4, !some.unknown.md !0
@@ -103,8 +94,7 @@ define void @store_atomic_bf16_global_system(bfloat %val, ptr addrspace(1) %ptr)
 define void @store_atomic_bf16_global_agent(bfloat %val, ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_bf16_global_agent(
 ; CHECK-SAME: bfloat [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast bfloat [[VAL]] to i16
-; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT:    store atomic bfloat [[VAL]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic bfloat %val, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 4, !some.unknown.md !0
@@ -114,8 +104,7 @@ define void @store_atomic_bf16_global_agent(bfloat %val, ptr addrspace(1) %ptr)
 define void @store_atomic_bf16_local(bfloat %val, ptr addrspace(3) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_bf16_local(
 ; CHECK-SAME: bfloat [[VAL:%.*]], ptr addrspace(3) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast bfloat [[VAL]] to i16
-; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(3) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    store atomic bfloat [[VAL]], ptr addrspace(3) [[PTR]] seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic bfloat %val, ptr addrspace(3) %ptr seq_cst, align 4, !some.unknown.md !0
@@ -125,8 +114,7 @@ define void @store_atomic_bf16_local(bfloat %val, ptr addrspace(3) %ptr) {
 define void @store_atomic_bf16_flat(bfloat %val, ptr %ptr) {
 ; CHECK-LABEL: define void @store_atomic_bf16_flat(
 ; CHECK-SAME: bfloat [[VAL:%.*]], ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast bfloat [[VAL]] to i16
-; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    store atomic bfloat [[VAL]], ptr [[PTR]] seq_cst, align 4, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic bfloat %val, ptr %ptr seq_cst, align 4, !some.unknown.md !0
@@ -135,8 +123,7 @@ define void @store_atomic_bf16_flat(bfloat %val, ptr %ptr) {
 define void @store_atomic_f64_global_system(double %val, ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f64_global_system(
 ; CHECK-SAME: double [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL]] to i64
-; CHECK-NEXT:    store atomic i64 [[TMP1]], ptr addrspace(1) [[PTR]] seq_cst, align 8
+; CHECK-NEXT:    store atomic double [[VAL]], ptr addrspace(1) [[PTR]] seq_cst, align 8, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic double %val, ptr addrspace(1) %ptr seq_cst, align 8, !some.unknown.md !0
@@ -146,8 +133,7 @@ define void @store_atomic_f64_global_system(double %val, ptr addrspace(1) %ptr)
 define void @store_atomic_f64_global_agent(double %val, ptr addrspace(1) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f64_global_agent(
 ; CHECK-SAME: double [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL]] to i64
-; CHECK-NEXT:    store atomic i64 [[TMP1]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 8
+; CHECK-NEXT:    store atomic double [[VAL]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 8, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic double %val, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 8, !some.unknown.md !0
@@ -157,8 +143,7 @@ define void @store_atomic_f64_global_agent(double %val, ptr addrspace(1) %ptr) {
 define void @store_atomic_f64_local(double %val, ptr addrspace(3) %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f64_local(
 ; CHECK-SAME: double [[VAL:%.*]], ptr addrspace(3) [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL]] to i64
-; CHECK-NEXT:    store atomic i64 [[TMP1]], ptr addrspace(3) [[PTR]] seq_cst, align 8
+; CHECK-NEXT:    store atomic double [[VAL]], ptr addrspace(3) [[PTR]] seq_cst, align 8, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic double %val, ptr addrspace(3) %ptr seq_cst, align 8, !some.unknown.md !0
@@ -168,8 +153,7 @@ define void @store_atomic_f64_local(double %val, ptr addrspace(3) %ptr) {
 define void @store_atomic_f64_flat(double %val, ptr %ptr) {
 ; CHECK-LABEL: define void @store_atomic_f64_flat(
 ; CHECK-SAME: double [[VAL:%.*]], ptr [[PTR:%.*]]) {
-; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL]] to i64
-; CHECK-NEXT:    store atomic i64 [[TMP1]], ptr [[PTR]] seq_cst, align 8
+; CHECK-NEXT:    store atomic double [[VAL]], ptr [[PTR]] seq_cst, align 8, !some.unknown.md [[META0]]
 ; CHECK-NEXT:    ret void
 ;
   store atomic double %val, ptr %ptr seq_cst, align 8, !some.unknown.md !0
@@ -177,3 +161,6 @@ define void @store_atomic_f64_flat(double %val, ptr %ptr) {
 }
 
 !0 = !{}
+;.
+; CHECK: [[META0]] = !{}
+;.



More information about the llvm-commits mailing list