[llvm] CodeGen/test: improve a test, regen with UTC (PR #113338)
Ramkumar Ramachandra via llvm-commits
llvm-commits at lists.llvm.org
Mon Nov 4 03:29:20 PST 2024
================
@@ -20,1595 +21,4947 @@
; TODO: optimize .sys.shared into .cta.shared or .cluster.shared .
-; generic statespace
+;; generic statespace
+
+; generic
+
+define void @generic_i8(ptr %a) {
+; CHECK-LABEL: generic_i8(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [generic_i8_param_0];
+; CHECK-NEXT: ld.u8 %rs1, [%rd1];
+; CHECK-NEXT: add.s16 %rs2, %rs1, 1;
+; CHECK-NEXT: st.u8 [%rd1], %rs2;
+; CHECK-NEXT: ret;
+ %a.load = load i8, ptr %a
+ %a.add = add i8 %a.load, 1
+ store i8 %a.add, ptr %a
+ ret void
+}
+
+define void @generic_i16(ptr %a) {
+; CHECK-LABEL: generic_i16(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [generic_i16_param_0];
+; CHECK-NEXT: ld.u16 %rs1, [%rd1];
+; CHECK-NEXT: add.s16 %rs2, %rs1, 1;
+; CHECK-NEXT: st.u16 [%rd1], %rs2;
+; CHECK-NEXT: ret;
+ %a.load = load i16, ptr %a
+ %a.add = add i16 %a.load, 1
+ store i16 %a.add, ptr %a
+ ret void
+}
+
+define void @generic_i32(ptr %a) {
+; CHECK-LABEL: generic_i32(
+; CHECK: {
+; CHECK-NEXT: .reg .b32 %r<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [generic_i32_param_0];
+; CHECK-NEXT: ld.u32 %r1, [%rd1];
+; CHECK-NEXT: add.s32 %r2, %r1, 1;
+; CHECK-NEXT: st.u32 [%rd1], %r2;
+; CHECK-NEXT: ret;
+ %a.load = load i32, ptr %a
+ %a.add = add i32 %a.load, 1
+ store i32 %a.add, ptr %a
+ ret void
+}
+
+define void @generic_i64(ptr %a) {
+; CHECK-LABEL: generic_i64(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<4>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [generic_i64_param_0];
+; CHECK-NEXT: ld.u64 %rd2, [%rd1];
+; CHECK-NEXT: add.s64 %rd3, %rd2, 1;
+; CHECK-NEXT: st.u64 [%rd1], %rd3;
+; CHECK-NEXT: ret;
+ %a.load = load i64, ptr %a
+ %a.add = add i64 %a.load, 1
+ store i64 %a.add, ptr %a
+ ret void
+}
+
+define void @generic_float(ptr %a) {
+; CHECK-LABEL: generic_float(
+; CHECK: {
+; CHECK-NEXT: .reg .f32 %f<3>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [generic_float_param_0];
+; CHECK-NEXT: ld.f32 %f1, [%rd1];
+; CHECK-NEXT: add.rn.f32 %f2, %f1, 0f3F800000;
+; CHECK-NEXT: st.f32 [%rd1], %f2;
+; CHECK-NEXT: ret;
+ %a.load = load float, ptr %a
+ %a.add = fadd float %a.load, 1.
+ store float %a.add, ptr %a
+ ret void
+}
+
+define void @generic_double(ptr %a) {
+; CHECK-LABEL: generic_double(
+; CHECK: {
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-NEXT: .reg .f64 %fd<3>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [generic_double_param_0];
+; CHECK-NEXT: ld.f64 %fd1, [%rd1];
+; CHECK-NEXT: add.rn.f64 %fd2, %fd1, 0d3FF0000000000000;
+; CHECK-NEXT: st.f64 [%rd1], %fd2;
+; CHECK-NEXT: ret;
+ %a.load = load double, ptr %a
+ %a.add = fadd double %a.load, 1.
+ store double %a.add, ptr %a
+ ret void
+}
+
+; TODO: make the lowering of this weak vector ops consistent with
+; the ones of the next tests. This test lowers to a weak PTX
+; vector op, but next test lowers to a vector PTX op.
+define void @generic_2xi8(ptr %a) {
+; CHECK-LABEL: generic_2xi8(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<5>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [generic_2xi8_param_0];
+; CHECK-NEXT: ld.v2.u8 {%rs1, %rs2}, [%rd1];
+; CHECK-NEXT: add.s16 %rs3, %rs2, 1;
+; CHECK-NEXT: add.s16 %rs4, %rs1, 1;
+; CHECK-NEXT: st.v2.u8 [%rd1], {%rs4, %rs3};
+; CHECK-NEXT: ret;
+ %a.load = load <2 x i8>, ptr %a
+ %a.add = add <2 x i8> %a.load, <i8 1, i8 1>
+ store <2 x i8> %a.add, ptr %a
+ ret void
+}
+
+; TODO: make the lowering of this weak vector ops consistent with
+; the ones of the previous test. This test lowers to a weak
+; PTX scalar op, but prior test lowers to a vector PTX op.
+define void @generic_4xi8(ptr %a) {
+; CHECK-LABEL: generic_4xi8(
+; CHECK: {
+; CHECK-NEXT: .reg .b16 %rs<9>;
+; CHECK-NEXT: .reg .b32 %r<13>;
+; CHECK-NEXT: .reg .b64 %rd<2>;
+; CHECK-EMPTY:
+; CHECK-NEXT: // %bb.0:
+; CHECK-NEXT: ld.param.u64 %rd1, [generic_4xi8_param_0];
+; CHECK-NEXT: ld.u32 %r1, [%rd1];
+; CHECK-NEXT: bfe.u32 %r2, %r1, 0, 8;
+; CHECK-NEXT: cvt.u16.u32 %rs1, %r2;
+; CHECK-NEXT: add.s16 %rs2, %rs1, 1;
+; CHECK-NEXT: cvt.u32.u16 %r3, %rs2;
+; CHECK-NEXT: bfe.u32 %r4, %r1, 8, 8;
+; CHECK-NEXT: cvt.u16.u32 %rs3, %r4;
+; CHECK-NEXT: add.s16 %rs4, %rs3, 1;
+; CHECK-NEXT: cvt.u32.u16 %r5, %rs4;
+; CHECK-NEXT: bfi.b32 %r6, %r5, %r3, 8, 8;
+; CHECK-NEXT: bfe.u32 %r7, %r1, 16, 8;
+; CHECK-NEXT: cvt.u16.u32 %rs5, %r7;
+; CHECK-NEXT: add.s16 %rs6, %rs5, 1;
+; CHECK-NEXT: cvt.u32.u16 %r8, %rs6;
+; CHECK-NEXT: bfi.b32 %r9, %r8, %r6, 16, 8;
+; CHECK-NEXT: bfe.u32 %r10, %r1, 24, 8;
+; CHECK-NEXT: cvt.u16.u32 %rs7, %r10;
+; CHECK-NEXT: add.s16 %rs8, %rs7, 1;
+; CHECK-NEXT: cvt.u32.u16 %r11, %rs8;
+; CHECK-NEXT: bfi.b32 %r12, %r11, %r9, 24, 8;
+; CHECK-NEXT: st.u32 [%rd1], %r12;
+; CHECK-NEXT: ret;
+ %a.load = load <4 x i8>, ptr %a
+ %a.add = add <4 x i8> %a.load, <i8 1, i8 1, i8 1, i8 1>
----------------
artagnon wrote:
Yes, otherwise the test becomes degenerate. Besides, I think every test should have free-standing meaning, independent of the current optimization pipeline.
https://github.com/llvm/llvm-project/pull/113338
More information about the llvm-commits
mailing list