[llvm] [NVPTX] Lower LLVM masked vector loads and stores to PTX (PR #159387)
Drew Kersnar via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 21 10:02:05 PDT 2025
================
@@ -0,0 +1,366 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_90 | FileCheck %s -check-prefixes=CHECK,SM90
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_90 | %ptxas-verify -arch=sm_90 %}
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_100 -mattr=+ptx88 | FileCheck %s -check-prefixes=CHECK,SM100
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_100 -mattr=+ptx88 | %ptxas-verify -arch=sm_100 %}
+
+
+; Different architectures are tested in this file for the following reasons:
+; - SM90 does not have 256-bit load/store instructions
+; - SM90 does not have masked store instructions
+; - SM90 does not support packed f32x2 instructions
+
+define void @global_8xi32(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_8xi32(
+; SM90: {
+; SM90-NEXT: .reg .b32 %r<9>;
+; SM90-NEXT: .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT: // %bb.0:
+; SM90-NEXT: ld.param.b64 %rd1, [global_8xi32_param_0];
+; SM90-NEXT: .pragma "used_bytes_mask 61440";
+; SM90-NEXT: ld.global.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; SM90-NEXT: .pragma "used_bytes_mask 3855";
+; SM90-NEXT: ld.global.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; SM90-NEXT: ld.param.b64 %rd2, [global_8xi32_param_1];
+; SM90-NEXT: st.global.b32 [%rd2], %r5;
+; SM90-NEXT: st.global.b32 [%rd2+8], %r7;
+; SM90-NEXT: st.global.b32 [%rd2+28], %r4;
+; SM90-NEXT: ret;
+;
+; SM100-LABEL: global_8xi32(
+; SM100: {
+; SM100-NEXT: .reg .b32 %r<9>;
+; SM100-NEXT: .reg .b64 %rd<3>;
+; SM100-EMPTY:
+; SM100-NEXT: // %bb.0:
+; SM100-NEXT: ld.param.b64 %rd1, [global_8xi32_param_0];
+; SM100-NEXT: .pragma "used_bytes_mask 4026535695";
+; SM100-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT: ld.param.b64 %rd2, [global_8xi32_param_1];
+; SM100-NEXT: st.global.v8.b32 [%rd2], {%r1, _, %r3, _, _, _, _, %r8};
+; SM100-NEXT: ret;
+ %a.load = tail call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) %a, i32 32, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> poison)
+ tail call void @llvm.masked.store.v8i32.p1(<8 x i32> %a.load, ptr addrspace(1) %b, i32 32, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+ ret void
+}
+
+; Masked stores are only supported for 32-bit element types,
+; while masked loads are supported for all element types.
+define void @global_16xi16(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_16xi16(
+; SM90: {
+; SM90-NEXT: .reg .b16 %rs<7>;
+; SM90-NEXT: .reg .b32 %r<9>;
+; SM90-NEXT: .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT: // %bb.0:
+; SM90-NEXT: ld.param.b64 %rd1, [global_16xi16_param_0];
+; SM90-NEXT: .pragma "used_bytes_mask 61440";
+; SM90-NEXT: ld.global.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; SM90-NEXT: mov.b32 {%rs1, %rs2}, %r4;
+; SM90-NEXT: .pragma "used_bytes_mask 3855";
+; SM90-NEXT: ld.global.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; SM90-NEXT: mov.b32 {%rs3, %rs4}, %r7;
+; SM90-NEXT: mov.b32 {%rs5, %rs6}, %r5;
+; SM90-NEXT: ld.param.b64 %rd2, [global_16xi16_param_1];
+; SM90-NEXT: st.global.b16 [%rd2], %rs5;
+; SM90-NEXT: st.global.b16 [%rd2+2], %rs6;
+; SM90-NEXT: st.global.b16 [%rd2+8], %rs3;
+; SM90-NEXT: st.global.b16 [%rd2+10], %rs4;
+; SM90-NEXT: st.global.b16 [%rd2+28], %rs1;
+; SM90-NEXT: st.global.b16 [%rd2+30], %rs2;
+; SM90-NEXT: ret;
+;
+; SM100-LABEL: global_16xi16(
+; SM100: {
+; SM100-NEXT: .reg .b16 %rs<7>;
+; SM100-NEXT: .reg .b32 %r<9>;
+; SM100-NEXT: .reg .b64 %rd<3>;
+; SM100-EMPTY:
+; SM100-NEXT: // %bb.0:
+; SM100-NEXT: ld.param.b64 %rd1, [global_16xi16_param_0];
+; SM100-NEXT: .pragma "used_bytes_mask 4026535695";
+; SM100-NEXT: ld.global.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT: mov.b32 {%rs1, %rs2}, %r8;
+; SM100-NEXT: mov.b32 {%rs3, %rs4}, %r3;
+; SM100-NEXT: mov.b32 {%rs5, %rs6}, %r1;
+; SM100-NEXT: ld.param.b64 %rd2, [global_16xi16_param_1];
+; SM100-NEXT: st.global.b16 [%rd2], %rs5;
+; SM100-NEXT: st.global.b16 [%rd2+2], %rs6;
+; SM100-NEXT: st.global.b16 [%rd2+8], %rs3;
+; SM100-NEXT: st.global.b16 [%rd2+10], %rs4;
+; SM100-NEXT: st.global.b16 [%rd2+28], %rs1;
+; SM100-NEXT: st.global.b16 [%rd2+30], %rs2;
+; SM100-NEXT: ret;
+ %a.load = tail call <16 x i16> @llvm.masked.load.v16i16.p1(ptr addrspace(1) %a, i32 32, <16 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>, <16 x i16> poison)
+ tail call void @llvm.masked.store.v16i16.p1(<16 x i16> %a.load, ptr addrspace(1) %b, i32 32, <16 x i1> <i1 true, i1 true, i1 false, i1 false, i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true, i1 true>)
+ ret void
+}
+
+define void @global_8xi32_no_align(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_8xi32_no_align(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<4>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [global_8xi32_no_align_param_0];
+; CHECK-NEXT: ld.global.b32 %r1, [%rd1];
+; CHECK-NEXT: ld.param.b64 %rd2, [global_8xi32_no_align_param_1];
+; CHECK-NEXT: ld.global.b32 %r2, [%rd1+8];
+; CHECK-NEXT: ld.global.b32 %r3, [%rd1+28];
+; CHECK-NEXT: st.global.b32 [%rd2], %r1;
+; CHECK-NEXT: st.global.b32 [%rd2+8], %r2;
+; CHECK-NEXT: st.global.b32 [%rd2+28], %r3;
+; CHECK-NEXT: ret;
+ %a.load = tail call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) %a, i32 16, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> poison)
+ tail call void @llvm.masked.store.v8i32.p1(<8 x i32> %a.load, ptr addrspace(1) %b, i32 16, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+ ret void
+}
+
+
+define void @global_8xi32_invariant(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; SM90-LABEL: global_8xi32_invariant(
+; SM90: {
+; SM90-NEXT: .reg .b32 %r<9>;
+; SM90-NEXT: .reg .b64 %rd<3>;
+; SM90-EMPTY:
+; SM90-NEXT: // %bb.0:
+; SM90-NEXT: ld.param.b64 %rd1, [global_8xi32_invariant_param_0];
+; SM90-NEXT: .pragma "used_bytes_mask 61440";
+; SM90-NEXT: ld.global.nc.v4.b32 {%r1, %r2, %r3, %r4}, [%rd1+16];
+; SM90-NEXT: .pragma "used_bytes_mask 3855";
+; SM90-NEXT: ld.global.nc.v4.b32 {%r5, %r6, %r7, %r8}, [%rd1];
+; SM90-NEXT: ld.param.b64 %rd2, [global_8xi32_invariant_param_1];
+; SM90-NEXT: st.global.b32 [%rd2], %r5;
+; SM90-NEXT: st.global.b32 [%rd2+8], %r7;
+; SM90-NEXT: st.global.b32 [%rd2+28], %r4;
+; SM90-NEXT: ret;
+;
+; SM100-LABEL: global_8xi32_invariant(
+; SM100: {
+; SM100-NEXT: .reg .b32 %r<9>;
+; SM100-NEXT: .reg .b64 %rd<3>;
+; SM100-EMPTY:
+; SM100-NEXT: // %bb.0:
+; SM100-NEXT: ld.param.b64 %rd1, [global_8xi32_invariant_param_0];
+; SM100-NEXT: .pragma "used_bytes_mask 4026535695";
+; SM100-NEXT: ld.global.nc.v8.b32 {%r1, %r2, %r3, %r4, %r5, %r6, %r7, %r8}, [%rd1];
+; SM100-NEXT: ld.param.b64 %rd2, [global_8xi32_invariant_param_1];
+; SM100-NEXT: st.global.v8.b32 [%rd2], {%r1, _, %r3, _, _, _, _, %r8};
+; SM100-NEXT: ret;
+ %a.load = tail call <8 x i32> @llvm.masked.load.v8i32.p1(ptr addrspace(1) %a, i32 32, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>, <8 x i32> poison), !invariant.load !0
+ tail call void @llvm.masked.store.v8i32.p1(<8 x i32> %a.load, ptr addrspace(1) %b, i32 32, <8 x i1> <i1 true, i1 false, i1 true, i1 false, i1 false, i1 false, i1 false, i1 true>)
+ ret void
+}
+
+define void @global_2xi16(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_2xi16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [global_2xi16_param_0];
+; CHECK-NEXT: .pragma "used_bytes_mask 3";
+; CHECK-NEXT: ld.global.b32 %r1, [%rd1];
+; CHECK-NEXT: ld.param.b64 %rd2, [global_2xi16_param_1];
+; CHECK-NEXT: mov.b32 {%rs1, _}, %r1;
+; CHECK-NEXT: st.global.b16 [%rd2], %rs1;
+; CHECK-NEXT: ret;
+ %a.load = tail call <2 x i16> @llvm.masked.load.v2i16.p1(ptr addrspace(1) %a, i32 4, <2 x i1> <i1 true, i1 false>, <2 x i16> poison)
+ tail call void @llvm.masked.store.v2i16.p1(<2 x i16> %a.load, ptr addrspace(1) %b, i32 4, <2 x i1> <i1 true, i1 false>)
+ ret void
+}
+
+define void @global_2xi16_invariant(ptr addrspace(1) %a, ptr addrspace(1) %b) {
+; CHECK-LABEL: global_2xi16_invariant(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<2>;
+; CHECK-NEXT: .reg .b32 %r<2>;
+; CHECK-NEXT: .reg .b64 %rd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.b64 %rd1, [global_2xi16_invariant_param_0];
+; CHECK-NEXT: .pragma "used_bytes_mask 3";
+; CHECK-NEXT: ld.global.nc.b32 %r1, [%rd1];
----------------
dakersnar wrote:
I think realistically we shouldn't end up with a masked load that looks like:
` %a.load = tail call <2 x i16> @llvm.masked.load.v2i16.p1(ptr addrspace(1) %a, i32 4, <2 x i1> <i1 true, i1 false>, <2 x i16> poison), !invariant.load !0`
I just wanted to check the edge case handling with the legal PTX packed types. If you think it would be good to optimize this away that's ok with me, but I suspect this isn't a real case we expect to see.
https://github.com/llvm/llvm-project/pull/159387
More information about the llvm-commits
mailing list