[llvm] [NVPTX] Add Volta Atomic SequentiallyConsistent Load and Store Operations (PR #98551)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 31 12:24:57 PDT 2024
================
@@ -785,11 +1290,77 @@ define void @local_plain(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace
; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
store double %f.add, ptr addrspace(5) %c
+ ; CHECK: ld.local.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %h.load = load <2 x i8>, ptr addrspace(5) %b
+ %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+ ; CHECK: st.local.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <2 x i8> %h.add, ptr addrspace(5) %b
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %i.load = load <4 x i8>, ptr addrspace(5) %c
+ %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <4 x i8> %i.add, ptr addrspace(5) %c
+
+ ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+ %j.load = load <2 x i16>, ptr addrspace(5) %c
+ %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+ ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+ store <2 x i16> %j.add, ptr addrspace(5) %c
+
+ ; CHECK: ld.local.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %k.load = load <4 x i16>, ptr addrspace(5) %d
+ %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+ ; CHECK: st.local.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+ store <4 x i16> %k.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %l.load = load <2 x i32>, ptr addrspace(5) %d
+ %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+ ; CHECK: st.local.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <2 x i32> %l.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %m.load = load <4 x i32>, ptr addrspace(5) %d
+ %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+ ; CHECK: st.local.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+ store <4 x i32> %m.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %n.load = load <2 x i64>, ptr addrspace(5) %d
+ %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+ ; CHECK: st.local.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+ store <2 x i64> %n.add, ptr addrspace(5) %d
+
+ ; Note: per https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#vectors
+ ; vectors cannot exceed 128-bit in length, i.e., .v4.u64 is not allowed.
+
+ ; CHECK: ld.local.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %o.load = load <2 x float>, ptr addrspace(5) %d
+ %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+ ; CHECK: st.local.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <2 x float> %o.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %p.load = load <4 x float>, ptr addrspace(5) %d
+ %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+ ; CHECK: st.local.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+ store <4 x float> %p.add, ptr addrspace(5) %d
+
+ ; CHECK: ld.local.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+ %q.load = load <2 x double>, ptr addrspace(5) %d
+ %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+ ; CHECK: st.local.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+ store <2 x double> %q.add, ptr addrspace(5) %d
+
ret void
}
; CHECK-LABEL: local_volatile
define void @local_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d) local_unnamed_addr {
+ ; TODO: generate PTX that preserves Concurrent Forward Progress
+ ; by using volatile operations.
----------------
gonzalobg wrote:
It means that we are producing invalid PTX right now in some corner cases that very few people run into.
There are a few ways to generate correct PTX here, but the gist of it is that we need to preserve volatile or atomic in the PTX, we can't just drop it and issue weak ops.
One option is, e.g., generating precisely the code that we are generating right now, but with an additional volatile/atomic load whose result is unused (its dead) from some dummy memory location.
I'll work on this eventually but we'll need to add a few more tests (to .const, and .param) first, because those suffer of a similar issue, and when we try to fix this we should make sure we fix this throughout.
https://github.com/llvm/llvm-project/pull/98551
More information about the llvm-commits
mailing list