[llvm] 76a3be7 - AMDGPU: Add baseline tests for bad bitcasting of atomic load/store

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 25 07:08:28 PDT 2024


Author: Matt Arsenault
Date: 2024-04-25T16:08:11+02:00
New Revision: 76a3be7c766bd55221c3d0d0a74c42f82c5d76ed

URL: https://github.com/llvm/llvm-project/commit/76a3be7c766bd55221c3d0d0a74c42f82c5d76ed
DIFF: https://github.com/llvm/llvm-project/commit/76a3be7c766bd55221c3d0d0a74c42f82c5d76ed.diff

LOG: AMDGPU: Add baseline tests for bad bitcasting of atomic load/store

Added: 
    llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-load.ll
    llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-store.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-load.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-load.ll
new file mode 100644
index 00000000000000..fd5a2044db48f3
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-load.ll
@@ -0,0 +1,195 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=atomic-expand %s | FileCheck %s
+
+; Make sure atomic loads are not bitcasted and lose metadata
+
+define float @load_atomic_f32_global_system(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define float @load_atomic_f32_global_system(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr addrspace(1) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT:    ret float [[TMP2]]
+;
+  %ld = load atomic float, ptr addrspace(1) %ptr seq_cst, align 4, !some.unknown.md !0
+  ret float %ld
+}
+
+define float @load_atomic_f32_global_agent(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define float @load_atomic_f32_global_agent(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT:    ret float [[TMP2]]
+;
+  %ld = load atomic float, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 4, !some.unknown.md !0
+  ret float %ld
+}
+
+define float @load_atomic_f32_local(ptr addrspace(3) %ptr) {
+; CHECK-LABEL: define float @load_atomic_f32_local(
+; CHECK-SAME: ptr addrspace(3) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr addrspace(3) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT:    ret float [[TMP2]]
+;
+  %ld = load atomic float, ptr addrspace(3) %ptr seq_cst, align 4, !some.unknown.md !0
+  ret float %ld
+}
+
+define float @load_atomic_f32_flat_system(ptr %ptr) {
+; CHECK-LABEL: define float @load_atomic_f32_flat_system(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT:    ret float [[TMP2]]
+;
+  %ld = load atomic float, ptr %ptr seq_cst, align 4, !some.unknown.md !0
+  ret float %ld
+}
+
+define float @load_atomic_f32_flat_agent(ptr %ptr) {
+; CHECK-LABEL: define float @load_atomic_f32_flat_agent(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i32, ptr [[PTR]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i32 [[TMP1]] to float
+; CHECK-NEXT:    ret float [[TMP2]]
+;
+  %ld = load atomic float, ptr %ptr syncscope("agent") seq_cst, align 4, !some.unknown.md !0
+  ret float %ld
+}
+
+define half @load_atomic_f16_global_system(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define half @load_atomic_f16_global_system(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i16, ptr addrspace(1) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16 [[TMP1]] to half
+; CHECK-NEXT:    ret half [[TMP2]]
+;
+  %ld = load atomic half, ptr addrspace(1) %ptr seq_cst, align 4, !some.unknown.md !0
+  ret half %ld
+}
+
+define half @load_atomic_f16_global_agent(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define half @load_atomic_f16_global_agent(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i16, ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16 [[TMP1]] to half
+; CHECK-NEXT:    ret half [[TMP2]]
+;
+  %ld = load atomic half, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 4, !some.unknown.md !0
+  ret half %ld
+}
+
+define half @load_atomic_f16_local(ptr addrspace(3) %ptr) {
+; CHECK-LABEL: define half @load_atomic_f16_local(
+; CHECK-SAME: ptr addrspace(3) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i16, ptr addrspace(3) [[PTR]] seq_cst, align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16 [[TMP1]] to half
+; CHECK-NEXT:    ret half [[TMP2]]
+;
+  %ld = load atomic half, ptr addrspace(3) %ptr seq_cst, align 2, !some.unknown.md !0
+  ret half %ld
+}
+
+define bfloat @load_atomic_bf16_global_system(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define bfloat @load_atomic_bf16_global_system(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i16, ptr addrspace(1) [[PTR]] seq_cst, align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16 [[TMP1]] to bfloat
+; CHECK-NEXT:    ret bfloat [[TMP2]]
+;
+  %ld = load atomic bfloat, ptr addrspace(1) %ptr seq_cst, align 2, !some.unknown.md !0
+  ret bfloat %ld
+}
+
+define bfloat @load_atomic_bf16_global_agent(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define bfloat @load_atomic_bf16_global_agent(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i16, ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16 [[TMP1]] to bfloat
+; CHECK-NEXT:    ret bfloat [[TMP2]]
+;
+  %ld = load atomic bfloat, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 2, !some.unknown.md !0
+  ret bfloat %ld
+}
+
+define bfloat @load_atomic_bf16_local(ptr addrspace(3) %ptr) {
+; CHECK-LABEL: define bfloat @load_atomic_bf16_local(
+; CHECK-SAME: ptr addrspace(3) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i16, ptr addrspace(3) [[PTR]] seq_cst, align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16 [[TMP1]] to bfloat
+; CHECK-NEXT:    ret bfloat [[TMP2]]
+;
+  %ld = load atomic bfloat, ptr addrspace(3) %ptr seq_cst, align 2, !some.unknown.md !0
+  ret bfloat %ld
+}
+
+define bfloat @load_atomic_bf16_flat(ptr %ptr) {
+; CHECK-LABEL: define bfloat @load_atomic_bf16_flat(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i16, ptr [[PTR]] seq_cst, align 2
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i16 [[TMP1]] to bfloat
+; CHECK-NEXT:    ret bfloat [[TMP2]]
+;
+  %ld = load atomic bfloat, ptr %ptr seq_cst, align 2, !some.unknown.md !0
+  ret bfloat %ld
+}
+
+define double @load_atomic_f64_global_system(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define double @load_atomic_f64_global_system(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i64, ptr addrspace(1) [[PTR]] seq_cst, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to double
+; CHECK-NEXT:    ret double [[TMP2]]
+;
+  %ld = load atomic double, ptr addrspace(1) %ptr seq_cst, align 8, !some.unknown.md !0
+  ret double %ld
+}
+
+define double @load_atomic_f64_global_agent(ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define double @load_atomic_f64_global_agent(
+; CHECK-SAME: ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i64, ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to double
+; CHECK-NEXT:    ret double [[TMP2]]
+;
+  %ld = load atomic double, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 8, !some.unknown.md !0
+  ret double %ld
+}
+
+define double @load_atomic_f64_local(ptr addrspace(3) %ptr) {
+; CHECK-LABEL: define double @load_atomic_f64_local(
+; CHECK-SAME: ptr addrspace(3) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i64, ptr addrspace(3) [[PTR]] seq_cst, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to double
+; CHECK-NEXT:    ret double [[TMP2]]
+;
+  %ld = load atomic double, ptr addrspace(3) %ptr seq_cst, align 8, !some.unknown.md !0
+  ret double %ld
+}
+
+define double @load_atomic_f64_flat_system(ptr %ptr) {
+; CHECK-LABEL: define double @load_atomic_f64_flat_system(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i64, ptr [[PTR]] seq_cst, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to double
+; CHECK-NEXT:    ret double [[TMP2]]
+;
+  %ld = load atomic double, ptr %ptr seq_cst, align 8, !some.unknown.md !0
+  ret double %ld
+}
+
+define double @load_atomic_f64_flat_agent(ptr %ptr) {
+; CHECK-LABEL: define double @load_atomic_f64_flat_agent(
+; CHECK-SAME: ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i64, ptr [[PTR]] syncscope("agent") seq_cst, align 8
+; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i64 [[TMP1]] to double
+; CHECK-NEXT:    ret double [[TMP2]]
+;
+  %ld = load atomic double, ptr %ptr syncscope("agent") seq_cst, align 8, !some.unknown.md !0
+  ret double %ld
+}
+
+!0 = !{}
+
+

diff  --git a/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-store.ll b/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-store.ll
new file mode 100644
index 00000000000000..db0c3a20e62f48
--- /dev/null
+++ b/llvm/test/Transforms/AtomicExpand/AMDGPU/no-expand-atomic-store.ll
@@ -0,0 +1,179 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -passes=atomic-expand %s | FileCheck %s
+
+define void @store_atomic_f32_global_system(float %val, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @store_atomic_f32_global_system(
+; CHECK-SAME: float [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL]] to i32
+; CHECK-NEXT:    store atomic i32 [[TMP1]], ptr addrspace(1) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic float %val, ptr addrspace(1) %ptr seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f32_global_agent(float %val, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @store_atomic_f32_global_agent(
+; CHECK-SAME: float [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL]] to i32
+; CHECK-NEXT:    store atomic i32 [[TMP1]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic float %val, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f32_local(float %val, ptr addrspace(3) %ptr) {
+; CHECK-LABEL: define void @store_atomic_f32_local(
+; CHECK-SAME: float [[VAL:%.*]], ptr addrspace(3) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL]] to i32
+; CHECK-NEXT:    store atomic i32 [[TMP1]], ptr addrspace(3) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic float %val, ptr addrspace(3) %ptr seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f32_flat(float %val, ptr %ptr) {
+; CHECK-LABEL: define void @store_atomic_f32_flat(
+; CHECK-SAME: float [[VAL:%.*]], ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast float [[VAL]] to i32
+; CHECK-NEXT:    store atomic i32 [[TMP1]], ptr [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic float %val, ptr %ptr seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f16_global_system(half %val, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @store_atomic_f16_global_system(
+; CHECK-SAME: half [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL]] to i16
+; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(1) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic half %val, ptr addrspace(1) %ptr seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f16_global_agent(half %val, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @store_atomic_f16_global_agent(
+; CHECK-SAME: half [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL]] to i16
+; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic half %val, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f16_local(half %val, ptr addrspace(3) %ptr) {
+; CHECK-LABEL: define void @store_atomic_f16_local(
+; CHECK-SAME: half [[VAL:%.*]], ptr addrspace(3) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL]] to i16
+; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(3) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic half %val, ptr addrspace(3) %ptr seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f16_flat(half %val, ptr %ptr) {
+; CHECK-LABEL: define void @store_atomic_f16_flat(
+; CHECK-SAME: half [[VAL:%.*]], ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast half [[VAL]] to i16
+; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic half %val, ptr %ptr seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_bf16_global_system(bfloat %val, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @store_atomic_bf16_global_system(
+; CHECK-SAME: bfloat [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast bfloat [[VAL]] to i16
+; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(1) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic bfloat %val, ptr addrspace(1) %ptr seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_bf16_global_agent(bfloat %val, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @store_atomic_bf16_global_agent(
+; CHECK-SAME: bfloat [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast bfloat [[VAL]] to i16
+; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic bfloat %val, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_bf16_local(bfloat %val, ptr addrspace(3) %ptr) {
+; CHECK-LABEL: define void @store_atomic_bf16_local(
+; CHECK-SAME: bfloat [[VAL:%.*]], ptr addrspace(3) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast bfloat [[VAL]] to i16
+; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr addrspace(3) [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic bfloat %val, ptr addrspace(3) %ptr seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_bf16_flat(bfloat %val, ptr %ptr) {
+; CHECK-LABEL: define void @store_atomic_bf16_flat(
+; CHECK-SAME: bfloat [[VAL:%.*]], ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast bfloat [[VAL]] to i16
+; CHECK-NEXT:    store atomic i16 [[TMP1]], ptr [[PTR]] seq_cst, align 4
+; CHECK-NEXT:    ret void
+;
+  store atomic bfloat %val, ptr %ptr seq_cst, align 4, !some.unknown.md !0
+  ret void
+}
+define void @store_atomic_f64_global_system(double %val, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @store_atomic_f64_global_system(
+; CHECK-SAME: double [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL]] to i64
+; CHECK-NEXT:    store atomic i64 [[TMP1]], ptr addrspace(1) [[PTR]] seq_cst, align 8
+; CHECK-NEXT:    ret void
+;
+  store atomic double %val, ptr addrspace(1) %ptr seq_cst, align 8, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f64_global_agent(double %val, ptr addrspace(1) %ptr) {
+; CHECK-LABEL: define void @store_atomic_f64_global_agent(
+; CHECK-SAME: double [[VAL:%.*]], ptr addrspace(1) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL]] to i64
+; CHECK-NEXT:    store atomic i64 [[TMP1]], ptr addrspace(1) [[PTR]] syncscope("agent") seq_cst, align 8
+; CHECK-NEXT:    ret void
+;
+  store atomic double %val, ptr addrspace(1) %ptr syncscope("agent") seq_cst, align 8, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f64_local(double %val, ptr addrspace(3) %ptr) {
+; CHECK-LABEL: define void @store_atomic_f64_local(
+; CHECK-SAME: double [[VAL:%.*]], ptr addrspace(3) [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL]] to i64
+; CHECK-NEXT:    store atomic i64 [[TMP1]], ptr addrspace(3) [[PTR]] seq_cst, align 8
+; CHECK-NEXT:    ret void
+;
+  store atomic double %val, ptr addrspace(3) %ptr seq_cst, align 8, !some.unknown.md !0
+  ret void
+}
+
+define void @store_atomic_f64_flat(double %val, ptr %ptr) {
+; CHECK-LABEL: define void @store_atomic_f64_flat(
+; CHECK-SAME: double [[VAL:%.*]], ptr [[PTR:%.*]]) {
+; CHECK-NEXT:    [[TMP1:%.*]] = bitcast double [[VAL]] to i64
+; CHECK-NEXT:    store atomic i64 [[TMP1]], ptr [[PTR]] seq_cst, align 8
+; CHECK-NEXT:    ret void
+;
+  store atomic double %val, ptr %ptr seq_cst, align 8, !some.unknown.md !0
+  ret void
+}
+
+!0 = !{}


        


More information about the llvm-commits mailing list