[llvm] [NVPTX] support packed f32 instructions for sm_100+ (PR #126337)
Artem Belevich via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 13 13:58:04 PST 2025
================
@@ -0,0 +1,2665 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; ## Full FP32x2 support enabled by default.
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
+; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | FileCheck --check-prefixes=CHECK-O0 %s
+; RUN: %if ptxas %{ \
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
+; RUN: -O0 -disable-post-ra -frame-pointer=all -verify-machineinstrs \
+; RUN: | %ptxas-verify -arch=sm_100 \
+; RUN: %}
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
+; RUN: -O3 -verify-machineinstrs \
+; RUN: | FileCheck --check-prefixes=CHECK-O3 %s
+; RUN: %if ptxas %{ \
+; RUN: llc < %s -mtriple=nvptx64-nvidia-cuda -mcpu=sm_100 \
+; RUN: -O3 -verify-machineinstrs \
+; RUN: | %ptxas-verify -arch=sm_100 \
+; RUN: %}
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+target triple = "nvptx64-nvidia-cuda"
+
+define <2 x float> @test_ret_const() #0 {
+;
+; CHECK-O0-LABEL: test_ret_const(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O0-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O0-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_ret_const(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: mov.f32 %f1, 0f40000000;
+; CHECK-O3-NEXT: mov.f32 %f2, 0f3F800000;
+; CHECK-O3-NEXT: st.param.v2.f32 [func_retval0], {%f2, %f1};
+; CHECK-O3-NEXT: ret;
+ ret <2 x float> <float 1.0, float 2.0>
+}
+
+define float @test_extract_0(<2 x float> %a) #0 {
+;
+; CHECK-O0-LABEL: test_extract_0(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_extract_0_param_0];
+; CHECK-O0-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_extract_0(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_extract_0_param_0];
+; CHECK-O3-NEXT: st.param.f32 [func_retval0], %f1;
+; CHECK-O3-NEXT: ret;
+ %e = extractelement <2 x float> %a, i32 0
+ ret float %e
+}
+
+define float @test_extract_1(<2 x float> %a) #0 {
+;
+; CHECK-O0-LABEL: test_extract_1(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .f32 %f<3>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_extract_1_param_0];
+; CHECK-O0-NEXT: st.param.f32 [func_retval0], %f2;
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_extract_1(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<3>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_extract_1_param_0];
+; CHECK-O3-NEXT: st.param.f32 [func_retval0], %f2;
+; CHECK-O3-NEXT: ret;
+ %e = extractelement <2 x float> %a, i32 1
+ ret float %e
+}
+
+; NOTE: disabled as -O3 miscompiles this into pointer arithmetic on
+; test_extract_i_param_0 where the symbol's address is not taken first (that
+; is, moved to a temporary)
+; define float @test_extract_i(<2 x float> %a, i64 %idx) #0 {
+; ; CHECK-LABEL: test_extract_i(
+; ; CHECK: {
+; ; CHECK-NEXT: .reg .pred %p<2>;
+; ; CHECK-NEXT: .reg .f32 %f<4>;
+; ; CHECK-NEXT: .reg .b64 %rd<2>;
+; ; CHECK-EMPTY:
+; ; CHECK-NEXT: // %bb.0:
+; ; CHECK-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_extract_i_param_0];
+; ; CHECK-NEXT: ld.param.u64 %rd1, [test_extract_i_param_1];
+; ; CHECK-NEXT: setp.eq.s64 %p1, %rd1, 0;
+; ; CHECK-NEXT: selp.f32 %f3, %f1, %f2, %p1;
+; ; CHECK-NEXT: st.param.f32 [func_retval0], %f3;
+; ; CHECK-NEXT: ret;
+; %e = extractelement <2 x float> %a, i64 %idx
+; ret float %e
+; }
+
+define <2 x float> @test_fadd(<2 x float> %a, <2 x float> %b) #0 {
+;
+; CHECK-O0-LABEL: test_fadd(
+; CHECK-O0: {
+; CHECK-O0-NEXT: .reg .b32 %r<5>;
+; CHECK-O0-NEXT: .reg .f32 %f<7>;
+; CHECK-O0-NEXT: .reg .b64 %rd<10>;
+; CHECK-O0-EMPTY:
+; CHECK-O0-NEXT: // %bb.0:
+; CHECK-O0-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_fadd_param_0];
+; CHECK-O0-NEXT: mov.b32 %r1, %f1;
+; CHECK-O0-NEXT: cvt.u64.u32 %rd2, %r1;
+; CHECK-O0-NEXT: mov.b32 %r2, %f2;
+; CHECK-O0-NEXT: cvt.u64.u32 %rd3, %r2;
+; CHECK-O0-NEXT: shl.b64 %rd4, %rd3, 32;
+; CHECK-O0-NEXT: or.b64 %rd5, %rd2, %rd4;
+; CHECK-O0-NEXT: ld.param.v2.f32 {%f3, %f4}, [test_fadd_param_1];
+; CHECK-O0-NEXT: mov.b32 %r3, %f3;
+; CHECK-O0-NEXT: cvt.u64.u32 %rd6, %r3;
+; CHECK-O0-NEXT: mov.b32 %r4, %f4;
+; CHECK-O0-NEXT: cvt.u64.u32 %rd7, %r4;
+; CHECK-O0-NEXT: shl.b64 %rd8, %rd7, 32;
+; CHECK-O0-NEXT: or.b64 %rd9, %rd6, %rd8;
+; CHECK-O0-NEXT: add.rn.f32x2 %rd1, %rd5, %rd9;
+; CHECK-O0-NEXT: mov.b64 {%f5, %f6}, %rd1;
+; CHECK-O0-NEXT: st.param.v2.f32 [func_retval0], {%f5, %f6};
+; CHECK-O0-NEXT: ret;
+;
+; CHECK-O3-LABEL: test_fadd(
+; CHECK-O3: {
+; CHECK-O3-NEXT: .reg .f32 %f<5>;
+; CHECK-O3-NEXT: .reg .b64 %rd<5>;
+; CHECK-O3-EMPTY:
+; CHECK-O3-NEXT: // %bb.0:
+; CHECK-O3-NEXT: ld.param.v2.f32 {%f1, %f2}, [test_fadd_param_0];
+; CHECK-O3-NEXT: mov.b64 %rd2, {%f1, %f2};
+; CHECK-O3-NEXT: ld.param.v2.f32 {%f3, %f4}, [test_fadd_param_1];
+; CHECK-O3-NEXT: mov.b64 %rd3, {%f3, %f4};
+; CHECK-O3-NEXT: add.rn.f32x2 %rd4, %rd2, %rd3;
+; CHECK-O3-NEXT: st.param.b64 [func_retval0], %rd4;
+; CHECK-O3-NEXT: ret;
+ %r = fadd <2 x float> %a, %b
----------------
Artem-B wrote:
> shouldn't we want to check that optimizations may not introduce new instructions
There are two things -- optimization results must be valid. And that must be tested.
Back-end must handle valid inputs.
Those are somewhat independent things.
The crash above is a bug (please file an issue for it, if one does not exist yet) and needs to be addressed.
whatever fixes it should also include the test that the code produces correct results regardless of optimization level.
This patch should concentrate on generating valid v2f32 operations where they are supported, and split them into scalar ops on older GPUs, and all of that should work about the same regardless of optimizations.
https://github.com/llvm/llvm-project/pull/126337
More information about the llvm-commits
mailing list