[llvm] [NVPTX] Add Volta Atomic SequentiallyConsistent Load and Store Operations (PR #98551)

Artem Belevich via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 25 15:42:43 PDT 2024


================
@@ -826,11 +1439,84 @@ define void @local_volatile(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrsp
   ; CHECK: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}}
   store volatile double %f.add, ptr addrspace(5) %c
 
+  ; TODO: should be combined into single .u16 op
+  ; CHECK: ld.local.v2.u8 {%rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+  %h.load = load volatile <2 x i8>, ptr addrspace(5) %b
+  %h.add = add <2 x i8> %h.load, <i8 1, i8 1>
+  ; CHECK: st.local.v2.u8 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}}
+  store volatile <2 x i8> %h.add, ptr addrspace(5) %b
+
+  ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+  %i.load = load volatile <4 x i8>, ptr addrspace(5) %c
+  %i.add = add <4 x i8> %i.load, <i8 1, i8 1, i8 1, i8 1>
+  ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+  store volatile <4 x i8> %i.add, ptr addrspace(5) %c
+
+  ; CHECK: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}]
+  %j.load = load volatile <2 x i16>, ptr addrspace(5) %c
+  %j.add = add <2 x i16> %j.load, <i16 1, i16 1>
+  ; CHECK: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}}
+  store volatile <2 x i16> %j.add, ptr addrspace(5) %c
+
+  ; TODO: should be combined into single .u64 op
+  ; CHECK: ld.local.v4.u16 {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}, [%rd{{[0-9]+}}]
+  %k.load = load volatile <4 x i16>, ptr addrspace(5) %d
+  %k.add = add <4 x i16> %k.load, <i16 1, i16 1, i16 1, i16 1>
+  ; CHECK: st.local.v4.u16 [%rd{{[0-9]+}}], {%rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}, %rs{{[0-9]+}}}
+  store volatile <4 x i16> %k.add, ptr addrspace(5) %d
+
+  ; TODO: should be combined into single .u64 op
+  ; CHECK: ld.local.v2.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+  %l.load = load volatile <2 x i32>, ptr addrspace(5) %d
+  %l.add = add <2 x i32> %l.load, <i32 1, i32 1>
+  ; CHECK: st.local.v2.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}}
+  store volatile <2 x i32> %l.add, ptr addrspace(5) %d
+
+  ; TODO: should be combined into single .b128 op in sm_70+
+  ; CHECK: ld.local.v4.u32 {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}, [%rd{{[0-9]+}}]
+  %m.load = load volatile <4 x i32>, ptr addrspace(5) %d
+  %m.add = add <4 x i32> %m.load, <i32 1, i32 1, i32 1, i32 1>
+  ; CHECK: st.local.v4.u32 [%rd{{[0-9]+}}], {%r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}, %r{{[0-9]+}}}
+  store volatile <4 x i32> %m.add, ptr addrspace(5) %d
+
+  ; TODO: should be combined into single .b128 op in sm_70+
+  ; CHECK: ld.local.v2.u64 {%rd{{[0-9]+}}, %rd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+  %n.load = load volatile <2 x i64>, ptr addrspace(5) %d
+  %n.add = add <2 x i64> %n.load, <i64 1, i64 1>
+  ; CHECK: st.local.v2.u64 [%rd{{[0-9]+}}], {%rd{{[0-9]+}}, %rd{{[0-9]+}}}
+  store volatile <2 x i64> %n.add, ptr addrspace(5) %d
+
+  ; Note: per https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#vectors
+  ; vectors cannot exceed 128-bit in length, i.e., .v4.u64 is not allowed.
+
+  ; TODO: should be combined into single .u64 op
+  ; CHECK: ld.local.v2.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+  %o.load = load volatile <2 x float>, ptr addrspace(5) %d
+  %o.add = fadd <2 x float> %o.load, <float 1., float 1.>
+  ; CHECK: st.local.v2.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}}
+  store volatile <2 x float> %o.add, ptr addrspace(5) %d
+
+  ; TODO: should be combined into single .b128 op in sm_70+
+  ; CHECK: ld.local.v4.f32 {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}, [%rd{{[0-9]+}}]
+  %p.load = load volatile <4 x float>, ptr addrspace(5) %d
+  %p.add = fadd <4 x float> %p.load, <float 1., float 1., float 1., float 1.>
+  ; CHECK: st.local.v4.f32 [%rd{{[0-9]+}}], {%f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}, %f{{[0-9]+}}}
+  store volatile <4 x float> %p.add, ptr addrspace(5) %d
+
+  ; TODO: should be combined into single .b128 op
+  ; CHECK: ld.local.v2.f64 {%fd{{[0-9]+}}, %fd{{[0-9]+}}}, [%rd{{[0-9]+}}]
+  %q.load = load volatile <2 x double>, ptr addrspace(5) %d
+  %q.add = fadd <2 x double> %q.load, <double 1., double 1.>
+  ; CHECK: st.local.v2.f64 [%rd{{[0-9]+}}], {%fd{{[0-9]+}}, %fd{{[0-9]+}}}
+  store volatile <2 x double> %q.add, ptr addrspace(5) %d
+
   ret void
 }
 
 ; CHECK-LABEL: local_monotonic
 define void @local_monotonic(ptr addrspace(5) %a, ptr addrspace(5) %b, ptr addrspace(5) %c, ptr addrspace(5) %d, ptr addrspace(5) %e) local_unnamed_addr {
+  ; TODO: this codegen looses Concurrent Forward Progress
----------------
Artem-B wrote:

This comment needs more context. Can you elaborate on what you mean?

https://github.com/llvm/llvm-project/pull/98551


More information about the llvm-commits mailing list