[llvm] [DAGCombiner] Lower dynamic insertelt chain more efficiently (PR #162368)
Artem Belevich via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 14 11:09:49 PDT 2025
================
@@ -0,0 +1,360 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mcpu=sm_20 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -mcpu=sm_20 | %ptxas-verify %}
+target triple = "nvptx64-nvidia-cuda"
+
+; COM: Save the vector to the stack once.
+define ptx_kernel void @lower_once(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) local_unnamed_addr {
+; CHECK-LABEL: lower_once(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot0[64];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b64 %rd<39>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: mov.b64 %SPL, __local_depot0;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b64 %rd1, [lower_once_param_0];
+; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [lower_once_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [lower_once_param_1+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [lower_once_param_1+32];
+; CHECK-NEXT: ld.param.v2.b64 {%rd10, %rd11}, [lower_once_param_1+48];
+; CHECK-NEXT: ld.shared.b64 %rd12, [%rd1+16];
+; CHECK-NEXT: ld.shared.b64 %rd13, [%rd1+24];
+; CHECK-NEXT: ld.param.b32 %rd14, [lower_once_param_2];
+; CHECK-NEXT: and.b64 %rd15, %rd14, 7;
+; CHECK-NEXT: shl.b64 %rd16, %rd15, 3;
+; CHECK-NEXT: add.u64 %rd17, %SP, 0;
+; CHECK-NEXT: add.s64 %rd18, %rd17, %rd16;
+; CHECK-NEXT: ld.param.b32 %rd19, [lower_once_param_3];
+; CHECK-NEXT: and.b64 %rd20, %rd19, 7;
+; CHECK-NEXT: shl.b64 %rd21, %rd20, 3;
+; CHECK-NEXT: add.s64 %rd22, %rd17, %rd21;
+; CHECK-NEXT: ld.param.b32 %rd23, [lower_once_param_4];
+; CHECK-NEXT: and.b64 %rd24, %rd23, 7;
+; CHECK-NEXT: shl.b64 %rd25, %rd24, 3;
+; CHECK-NEXT: add.s64 %rd26, %rd17, %rd25;
+; CHECK-NEXT: st.b64 [%SP+56], %rd11;
+; CHECK-NEXT: st.b64 [%SP+48], %rd10;
+; CHECK-NEXT: st.b64 [%SP+40], %rd9;
+; CHECK-NEXT: st.b64 [%SP+32], %rd8;
+; CHECK-NEXT: st.b64 [%SP+24], %rd7;
+; CHECK-NEXT: st.b64 [%SP+16], %rd6;
+; CHECK-NEXT: st.b64 [%SP+8], %rd5;
+; CHECK-NEXT: st.b64 [%SP], %rd4;
+; CHECK-NEXT: st.b64 [%rd18], %rd2;
+; CHECK-NEXT: st.b64 [%rd22], %rd3;
+; CHECK-NEXT: st.b64 [%rd26], %rd12;
+; CHECK-NEXT: ld.param.b32 %rd27, [lower_once_param_5];
+; CHECK-NEXT: and.b64 %rd28, %rd27, 7;
+; CHECK-NEXT: shl.b64 %rd29, %rd28, 3;
+; CHECK-NEXT: add.s64 %rd30, %rd17, %rd29;
+; CHECK-NEXT: st.b64 [%rd30], %rd13;
+; CHECK-NEXT: ld.b64 %rd31, [%SP+8];
+; CHECK-NEXT: ld.b64 %rd32, [%SP];
+; CHECK-NEXT: ld.b64 %rd33, [%SP+24];
+; CHECK-NEXT: ld.b64 %rd34, [%SP+16];
+; CHECK-NEXT: ld.b64 %rd35, [%SP+40];
+; CHECK-NEXT: ld.b64 %rd36, [%SP+32];
+; CHECK-NEXT: ld.b64 %rd37, [%SP+56];
+; CHECK-NEXT: ld.b64 %rd38, [%SP+48];
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd38, %rd37};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd36, %rd35};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd34, %rd33};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd32, %rd31};
+; CHECK-NEXT: ret;
+entry:
+ %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
+ %element.0 = load double, ptr addrspace(3) %offset.0, align 64
+ %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
+ %element.1 = load double, ptr addrspace(3) %offset.1, align 8
+ %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
+ %element.2 = load double, ptr addrspace(3) %offset.2, align 8
+ %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
+ %element.3 = load double, ptr addrspace(3) %offset.3, align 8
+ %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 %idx0
+ %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 %idx1
+ %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 %idx2
+ %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx3
+ %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
+ store <8 x double> %vector.build3, ptr addrspace(3) %location, align 64
+ ret void
+}
+
+; COM: Save the vector to the stack twice. Because these are in two different
+; slots, the resulting sequences may be non-overlapping even though the
+; insertelt sequences overlap.
+define ptx_kernel void @lower_twice(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0, i32 %idx1, i32 %idx2, i32 %idx3) local_unnamed_addr {
+; CHECK-LABEL: lower_twice(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot1[128];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b64 %rd<51>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: mov.b64 %SPL, __local_depot1;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b64 %rd1, [lower_twice_param_0];
+; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [lower_twice_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [lower_twice_param_1+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [lower_twice_param_1+32];
+; CHECK-NEXT: ld.param.v2.b64 {%rd10, %rd11}, [lower_twice_param_1+48];
+; CHECK-NEXT: ld.shared.b64 %rd12, [%rd1+16];
+; CHECK-NEXT: ld.shared.b64 %rd13, [%rd1+24];
+; CHECK-NEXT: ld.param.b32 %rd14, [lower_twice_param_2];
+; CHECK-NEXT: and.b64 %rd15, %rd14, 7;
+; CHECK-NEXT: shl.b64 %rd16, %rd15, 3;
+; CHECK-NEXT: add.u64 %rd17, %SP, 0;
+; CHECK-NEXT: add.s64 %rd18, %rd17, %rd16;
+; CHECK-NEXT: add.u64 %rd19, %SP, 64;
+; CHECK-NEXT: add.s64 %rd20, %rd19, %rd16;
+; CHECK-NEXT: ld.param.b32 %rd21, [lower_twice_param_3];
+; CHECK-NEXT: and.b64 %rd22, %rd21, 7;
+; CHECK-NEXT: shl.b64 %rd23, %rd22, 3;
+; CHECK-NEXT: add.s64 %rd24, %rd17, %rd23;
+; CHECK-NEXT: add.s64 %rd25, %rd19, %rd23;
+; CHECK-NEXT: st.b64 [%SP+120], %rd11;
+; CHECK-NEXT: st.b64 [%SP+112], %rd10;
+; CHECK-NEXT: st.b64 [%SP+104], %rd9;
+; CHECK-NEXT: st.b64 [%SP+96], %rd8;
+; CHECK-NEXT: st.b64 [%SP+88], %rd7;
+; CHECK-NEXT: st.b64 [%SP+80], %rd6;
+; CHECK-NEXT: st.b64 [%SP+72], %rd5;
+; CHECK-NEXT: st.b64 [%SP+64], %rd4;
+; CHECK-NEXT: st.b64 [%rd20], %rd2;
+; CHECK-NEXT: st.b64 [%rd25], %rd3;
+; CHECK-NEXT: ld.param.b32 %rd26, [lower_twice_param_4];
+; CHECK-NEXT: and.b64 %rd27, %rd26, 7;
+; CHECK-NEXT: shl.b64 %rd28, %rd27, 3;
+; CHECK-NEXT: add.s64 %rd29, %rd19, %rd28;
+; CHECK-NEXT: st.b64 [%rd29], %rd12;
+; CHECK-NEXT: add.s64 %rd30, %rd17, %rd28;
+; CHECK-NEXT: ld.b64 %rd31, [%SP+72];
+; CHECK-NEXT: ld.b64 %rd32, [%SP+64];
+; CHECK-NEXT: ld.b64 %rd33, [%SP+88];
+; CHECK-NEXT: ld.b64 %rd34, [%SP+80];
+; CHECK-NEXT: ld.b64 %rd35, [%SP+104];
+; CHECK-NEXT: ld.b64 %rd36, [%SP+96];
+; CHECK-NEXT: ld.b64 %rd37, [%SP+120];
+; CHECK-NEXT: ld.b64 %rd38, [%SP+112];
+; CHECK-NEXT: st.b64 [%SP+56], %rd11;
+; CHECK-NEXT: st.b64 [%SP+48], %rd10;
+; CHECK-NEXT: st.b64 [%SP+40], %rd9;
+; CHECK-NEXT: st.b64 [%SP+32], %rd8;
+; CHECK-NEXT: st.b64 [%SP+24], %rd7;
+; CHECK-NEXT: st.b64 [%SP+16], %rd6;
+; CHECK-NEXT: st.b64 [%SP+8], %rd5;
+; CHECK-NEXT: st.b64 [%SP], %rd4;
+; CHECK-NEXT: st.b64 [%rd18], %rd2;
+; CHECK-NEXT: st.b64 [%rd24], %rd3;
+; CHECK-NEXT: st.b64 [%rd30], %rd12;
+; CHECK-NEXT: ld.param.b32 %rd39, [lower_twice_param_5];
+; CHECK-NEXT: and.b64 %rd40, %rd39, 7;
+; CHECK-NEXT: shl.b64 %rd41, %rd40, 3;
+; CHECK-NEXT: add.s64 %rd42, %rd17, %rd41;
+; CHECK-NEXT: st.b64 [%rd42], %rd13;
+; CHECK-NEXT: ld.b64 %rd43, [%SP+8];
+; CHECK-NEXT: ld.b64 %rd44, [%SP];
+; CHECK-NEXT: ld.b64 %rd45, [%SP+24];
+; CHECK-NEXT: ld.b64 %rd46, [%SP+16];
+; CHECK-NEXT: ld.b64 %rd47, [%SP+40];
+; CHECK-NEXT: ld.b64 %rd48, [%SP+32];
+; CHECK-NEXT: ld.b64 %rd49, [%SP+56];
+; CHECK-NEXT: ld.b64 %rd50, [%SP+48];
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd50, %rd49};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd48, %rd47};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd46, %rd45};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd44, %rd43};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1144], {%rd38, %rd37};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1128], {%rd36, %rd35};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1112], {%rd34, %rd33};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1096], {%rd32, %rd31};
+; CHECK-NEXT: ret;
+entry:
+ %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
+ %element.0 = load double, ptr addrspace(3) %offset.0, align 64
+ %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
+ %element.1 = load double, ptr addrspace(3) %offset.1, align 8
+ %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
+ %element.2 = load double, ptr addrspace(3) %offset.2, align 8
+ %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
+ %element.3 = load double, ptr addrspace(3) %offset.3, align 8
+
+; COM: begin chain 1
+ %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 %idx0
+ %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 %idx1
+
+; COM: interleave a second chain of insertelements
+ %vector.build1-2 = insertelement <8 x double> %vector.build1, double %element.2, i32 %idx2
+
+; COM: continue chain 1
+ %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 %idx2
+ %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx3
+
+; COM: save chain 1
+ %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
+ store <8 x double> %vector.build3, ptr addrspace(3) %location, align 64
+
+; COM: save chain 2
+ %location-2 = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1096
+ store <8 x double> %vector.build1-2, ptr addrspace(3) %location-2, align 64
+ ret void
+}
+
+; COM: a chain of insertelts may include dynamic and constant indices. We only
+; reduce the total number of memory operations if there is a high ratio of
+; dynamic to constant insertelts.
+
+; COM: lower all insertelts to stores. This avoids lowering the two dynamic
+; insertelts individually and saves memory.
+define ptx_kernel void @mix_lower_all(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0, i32 %idx2) local_unnamed_addr {
+; CHECK-LABEL: mix_lower_all(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot2[64];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b64 %rd<31>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: mov.b64 %SPL, __local_depot2;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b64 %rd1, [mix_lower_all_param_0];
+; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [mix_lower_all_param_1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [mix_lower_all_param_1+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [mix_lower_all_param_1+32];
+; CHECK-NEXT: ld.param.v2.b64 {%rd10, %rd11}, [mix_lower_all_param_1+48];
+; CHECK-NEXT: ld.shared.b64 %rd12, [%rd1+16];
+; CHECK-NEXT: ld.shared.b64 %rd13, [%rd1+24];
+; CHECK-NEXT: ld.param.b32 %rd14, [mix_lower_all_param_2];
+; CHECK-NEXT: and.b64 %rd15, %rd14, 7;
+; CHECK-NEXT: shl.b64 %rd16, %rd15, 3;
+; CHECK-NEXT: add.u64 %rd17, %SP, 0;
+; CHECK-NEXT: add.s64 %rd18, %rd17, %rd16;
+; CHECK-NEXT: st.b64 [%SP+56], %rd11;
+; CHECK-NEXT: st.b64 [%SP+48], %rd10;
+; CHECK-NEXT: st.b64 [%SP+40], %rd9;
+; CHECK-NEXT: st.b64 [%SP+32], %rd8;
+; CHECK-NEXT: st.b64 [%SP+24], %rd7;
+; CHECK-NEXT: st.b64 [%SP+16], %rd6;
+; CHECK-NEXT: st.b64 [%SP+8], %rd5;
+; CHECK-NEXT: st.b64 [%SP], %rd4;
+; CHECK-NEXT: st.b64 [%rd18], %rd2;
+; CHECK-NEXT: st.b64 [%SP+16], %rd12;
+; CHECK-NEXT: st.b64 [%SP+8], %rd3;
+; CHECK-NEXT: ld.param.b32 %rd19, [mix_lower_all_param_3];
+; CHECK-NEXT: and.b64 %rd20, %rd19, 7;
+; CHECK-NEXT: shl.b64 %rd21, %rd20, 3;
+; CHECK-NEXT: add.s64 %rd22, %rd17, %rd21;
+; CHECK-NEXT: st.b64 [%rd22], %rd13;
+; CHECK-NEXT: ld.b64 %rd23, [%SP+8];
+; CHECK-NEXT: ld.b64 %rd24, [%SP];
+; CHECK-NEXT: ld.b64 %rd25, [%SP+24];
+; CHECK-NEXT: ld.b64 %rd26, [%SP+16];
+; CHECK-NEXT: ld.b64 %rd27, [%SP+40];
+; CHECK-NEXT: ld.b64 %rd28, [%SP+32];
+; CHECK-NEXT: ld.b64 %rd29, [%SP+56];
+; CHECK-NEXT: ld.b64 %rd30, [%SP+48];
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd30, %rd29};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd28, %rd27};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd26, %rd25};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd24, %rd23};
+; CHECK-NEXT: ret;
+entry:
+ %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
+ %element.0 = load double, ptr addrspace(3) %offset.0, align 64
+ %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
+ %element.1 = load double, ptr addrspace(3) %offset.1, align 8
+ %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
+ %element.2 = load double, ptr addrspace(3) %offset.2, align 8
+ %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
+ %element.3 = load double, ptr addrspace(3) %offset.3, align 8
+ %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 %idx0
+ %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 1
+ %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 2
+ %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx2
+ %location = getelementptr i8, ptr addrspace(3) %shared.mem, i32 1024
+ store <8 x double> %vector.build3, ptr addrspace(3) %location, align 64
+ ret void
+}
+
+; COM: lower only the single dynamic insertelt. Lowering the constant
+; insertelts to stores would not reduce the total amount of loads and stores.
+define ptx_kernel void @mix_lower_some(ptr addrspace(3) %shared.mem, <8 x double> %vector, i32 %idx0) local_unnamed_addr {
+; CHECK-LABEL: mix_lower_some(
+; CHECK: {
+; CHECK-NEXT: .local .align 8 .b8 __local_depot3[64];
+; CHECK-NEXT: .reg .b64 %SP;
+; CHECK-NEXT: .reg .b64 %SPL;
+; CHECK-NEXT: .reg .b64 %rd<25>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0: // %entry
+; CHECK-NEXT: mov.b64 %SPL, __local_depot3;
+; CHECK-NEXT: cvta.local.u64 %SP, %SPL;
+; CHECK-NEXT: ld.param.b64 %rd1, [mix_lower_some_param_0];
+; CHECK-NEXT: ld.shared.v2.b64 {%rd2, %rd3}, [%rd1];
+; CHECK-NEXT: ld.param.v2.b64 {%rd4, %rd5}, [mix_lower_some_param_1+16];
+; CHECK-NEXT: ld.param.v2.b64 {%rd6, %rd7}, [mix_lower_some_param_1+32];
+; CHECK-NEXT: ld.param.v2.b64 {%rd8, %rd9}, [mix_lower_some_param_1+48];
+; CHECK-NEXT: ld.param.b32 %rd10, [mix_lower_some_param_2];
+; CHECK-NEXT: and.b64 %rd11, %rd10, 7;
+; CHECK-NEXT: shl.b64 %rd12, %rd11, 3;
+; CHECK-NEXT: add.u64 %rd13, %SP, 0;
+; CHECK-NEXT: add.s64 %rd14, %rd13, %rd12;
+; CHECK-NEXT: ld.shared.b64 %rd15, [%rd1+16];
+; CHECK-NEXT: ld.shared.b64 %rd16, [%rd1+24];
+; CHECK-NEXT: ld.shared.b64 %rd17, [%rd1+32];
+; CHECK-NEXT: ld.shared.b64 %rd18, [%rd1+40];
+; CHECK-NEXT: ld.shared.b64 %rd19, [%rd1+48];
+; CHECK-NEXT: ld.shared.b64 %rd20, [%rd1+56];
+; CHECK-NEXT: st.b64 [%SP+56], %rd9;
+; CHECK-NEXT: st.b64 [%SP+48], %rd8;
+; CHECK-NEXT: st.b64 [%SP+40], %rd7;
+; CHECK-NEXT: st.b64 [%SP+32], %rd6;
+; CHECK-NEXT: st.b64 [%SP+24], %rd5;
+; CHECK-NEXT: st.b64 [%SP+16], %rd15;
+; CHECK-NEXT: st.b64 [%SP+8], %rd3;
+; CHECK-NEXT: st.b64 [%SP], %rd2;
+; CHECK-NEXT: st.b64 [%rd14], %rd16;
+; CHECK-NEXT: ld.b64 %rd21, [%SP+8];
+; CHECK-NEXT: ld.b64 %rd22, [%SP];
+; CHECK-NEXT: ld.b64 %rd23, [%SP+24];
+; CHECK-NEXT: ld.b64 %rd24, [%SP+16];
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1072], {%rd19, %rd20};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1056], {%rd17, %rd18};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1040], {%rd24, %rd23};
+; CHECK-NEXT: st.shared.v2.b64 [%rd1+1024], {%rd22, %rd21};
+; CHECK-NEXT: ret;
+entry:
+ %offset.0 = getelementptr double, ptr addrspace(3) %shared.mem, i32 0
+ %element.0 = load double, ptr addrspace(3) %offset.0, align 64
+ %offset.1 = getelementptr double, ptr addrspace(3) %shared.mem, i32 1
+ %element.1 = load double, ptr addrspace(3) %offset.1, align 8
+ %offset.2 = getelementptr double, ptr addrspace(3) %shared.mem, i32 2
+ %element.2 = load double, ptr addrspace(3) %offset.2, align 8
+ %offset.3 = getelementptr double, ptr addrspace(3) %shared.mem, i32 3
+ %element.3 = load double, ptr addrspace(3) %offset.3, align 8
+ %offset.4 = getelementptr double, ptr addrspace(3) %shared.mem, i32 4
+ %element.4 = load double, ptr addrspace(3) %offset.4, align 8
+ %offset.5 = getelementptr double, ptr addrspace(3) %shared.mem, i32 5
+ %element.5 = load double, ptr addrspace(3) %offset.5, align 8
+ %offset.6 = getelementptr double, ptr addrspace(3) %shared.mem, i32 6
+ %element.6 = load double, ptr addrspace(3) %offset.6, align 8
+ %offset.7 = getelementptr double, ptr addrspace(3) %shared.mem, i32 7
+ %element.7 = load double, ptr addrspace(3) %offset.7, align 8
+ %vector.build0 = insertelement <8 x double> %vector, double %element.0, i32 0
+ %vector.build1 = insertelement <8 x double> %vector.build0, double %element.1, i32 1
+ %vector.build2 = insertelement <8 x double> %vector.build1, double %element.2, i32 2
+ %vector.build3 = insertelement <8 x double> %vector.build2, double %element.3, i32 %idx0
----------------
Artem-B wrote:
What's the general strategy for testing these changes? I see some complex cases tested, but the basic ones appear to be absent. While making sure that we can handle nontrivial use cases is great, having small very specific test cases is also important -- when things break, the less moving parts we have in the failing test, the better.
For the new functionality like this I would expect to see a somewhat more systemic approach covering individual corner cases, before testing the combination of them. It makes debugging/troubleshooting easier.
E.g.:
- individual tests for dynamic insertion:
- at the beginning of the chain
- at the end of the chain
- in the middle of the chain
- repeated insertion with the same dynamic index
Then build more complicated tests with multiple dynamic indices, conditional insertion, etc.
https://github.com/llvm/llvm-project/pull/162368
More information about the llvm-commits
mailing list