[llvm] dd3aa5e - [NVPTX] Update 32-bit NVPTX tests to use 64-bit

Joseph Huber via llvm-commits llvm-commits at lists.llvm.org
Wed Jul 10 14:01:20 PDT 2024


Author: Joseph Huber
Date: 2024-07-10T16:01:12-05:00
New Revision: dd3aa5eb706eb18a9039e01f9434d432ac468340

URL: https://github.com/llvm/llvm-project/commit/dd3aa5eb706eb18a9039e01f9434d432ac468340
DIFF: https://github.com/llvm/llvm-project/commit/dd3aa5eb706eb18a9039e01f9434d432ac468340.diff

LOG: [NVPTX] Update 32-bit NVPTX tests to use 64-bit

Summary:
These used the now-removed `nvptx` target. Because they used
`ptxas-verify` they would then fail since it's been removed. Update
these to 64-bit.

Added: 
    

Modified: 
    llvm/test/CodeGen/NVPTX/inline-asm-b128-test1.ll
    llvm/test/CodeGen/NVPTX/inline-asm-b128-test2.ll
    llvm/test/CodeGen/NVPTX/inline-asm-b128-test3.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/NVPTX/inline-asm-b128-test1.ll b/llvm/test/CodeGen/NVPTX/inline-asm-b128-test1.ll
index 3232f40a40a70..cfbdb73e8e957 100644
--- a/llvm/test/CodeGen/NVPTX/inline-asm-b128-test1.ll
+++ b/llvm/test/CodeGen/NVPTX/inline-asm-b128-test1.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --extra_scrub --version 5
-; RUN: llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx83 | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx83 | %ptxas-verify -arch=sm_70 %}
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx83 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx83 | %ptxas-verify -arch=sm_70 %}
 
 target triple = "nvptx64-nvidia-cuda"
 
@@ -9,22 +9,19 @@ target triple = "nvptx64-nvidia-cuda"
 define void @test_b128_input_from_const() {
 ; CHECK-LABEL: test_b128_input_from_const(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<3>;
-; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-NEXT:    .reg .b64 %rd<5>;
 ; CHECK-NEXT:    .reg .b128 %rq<2>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
 ; CHECK-NEXT:    mov.u64 %rd2, 0;
 ; CHECK-NEXT:    mov.u64 %rd3, 42;
 ; CHECK-NEXT:    mov.b128 %rq1, {%rd3, %rd2};
-; CHECK-NEXT:    mov.u32 %r1, value;
-; CHECK-NEXT:    cvta.global.u32 %r2, %r1;
-; CHECK-NEXT:    cvt.u64.u32 %rd1, %r2;
+; CHECK-NEXT:    mov.u64 %rd4, value;
+; CHECK-NEXT:    cvta.global.u64 %rd1, %rd4;
 ; CHECK-NEXT:    // begin inline asm
 ; CHECK-NEXT:    { st.b128 [%rd1], %rq1; }
 ; CHECK-NEXT:    // end inline asm
 ; CHECK-NEXT:    ret;
-
   tail call void asm sideeffect "{ st.b128 [$0], $1; }", "l,q"(ptr nonnull addrspacecast (ptr addrspace(1) @value to ptr), i128 42)
   ret void
 }
@@ -32,24 +29,21 @@ define void @test_b128_input_from_const() {
 define void @test_b128_input_from_load(ptr nocapture readonly %data) {
 ; CHECK-LABEL: test_b128_input_from_load(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<5>;
-; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
 ; CHECK-NEXT:    .reg .b128 %rq<2>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.u32 %r1, [test_b128_input_from_load_param_0];
-; CHECK-NEXT:    cvta.to.global.u32 %r2, %r1;
-; CHECK-NEXT:    ld.global.u64 %rd2, [%r2+8];
-; CHECK-NEXT:    ld.global.u64 %rd3, [%r2];
-; CHECK-NEXT:    mov.b128 %rq1, {%rd3, %rd2};
-; CHECK-NEXT:    mov.u32 %r3, value;
-; CHECK-NEXT:    cvta.global.u32 %r4, %r3;
-; CHECK-NEXT:    cvt.u64.u32 %rd1, %r4;
+; CHECK-NEXT:    ld.param.u64 %rd2, [test_b128_input_from_load_param_0];
+; CHECK-NEXT:    cvta.to.global.u64 %rd3, %rd2;
+; CHECK-NEXT:    ld.global.u64 %rd4, [%rd3+8];
+; CHECK-NEXT:    ld.global.u64 %rd5, [%rd3];
+; CHECK-NEXT:    mov.b128 %rq1, {%rd5, %rd4};
+; CHECK-NEXT:    mov.u64 %rd6, value;
+; CHECK-NEXT:    cvta.global.u64 %rd1, %rd6;
 ; CHECK-NEXT:    // begin inline asm
 ; CHECK-NEXT:    { st.b128 [%rd1], %rq1; }
 ; CHECK-NEXT:    // end inline asm
 ; CHECK-NEXT:    ret;
-
   %1 = addrspacecast ptr %data to ptr addrspace(1)
   %2 = load <2 x i64>, ptr addrspace(1) %1, align 16
   %3 = bitcast <2 x i64> %2 to i128
@@ -62,26 +56,23 @@ define void @test_b128_input_from_select(ptr nocapture readonly %flag) {
 ; CHECK:       {
 ; CHECK-NEXT:    .reg .pred %p<2>;
 ; CHECK-NEXT:    .reg .b16 %rs<2>;
-; CHECK-NEXT:    .reg .b32 %r<5>;
-; CHECK-NEXT:    .reg .b64 %rd<4>;
+; CHECK-NEXT:    .reg .b64 %rd<7>;
 ; CHECK-NEXT:    .reg .b128 %rq<2>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.u32 %r1, [test_b128_input_from_select_param_0];
-; CHECK-NEXT:    cvta.to.global.u32 %r2, %r1;
-; CHECK-NEXT:    ld.global.u8 %rs1, [%r2];
+; CHECK-NEXT:    ld.param.u64 %rd2, [test_b128_input_from_select_param_0];
+; CHECK-NEXT:    cvta.to.global.u64 %rd3, %rd2;
+; CHECK-NEXT:    ld.global.u8 %rs1, [%rd3];
 ; CHECK-NEXT:    setp.eq.s16 %p1, %rs1, 0;
-; CHECK-NEXT:    selp.b64 %rd2, 24, 42, %p1;
-; CHECK-NEXT:    mov.u64 %rd3, 0;
-; CHECK-NEXT:    mov.b128 %rq1, {%rd2, %rd3};
-; CHECK-NEXT:    mov.u32 %r3, value;
-; CHECK-NEXT:    cvta.global.u32 %r4, %r3;
-; CHECK-NEXT:    cvt.u64.u32 %rd1, %r4;
+; CHECK-NEXT:    selp.b64 %rd4, 24, 42, %p1;
+; CHECK-NEXT:    mov.u64 %rd5, 0;
+; CHECK-NEXT:    mov.b128 %rq1, {%rd4, %rd5};
+; CHECK-NEXT:    mov.u64 %rd6, value;
+; CHECK-NEXT:    cvta.global.u64 %rd1, %rd6;
 ; CHECK-NEXT:    // begin inline asm
 ; CHECK-NEXT:    { st.b128 [%rd1], %rq1; }
 ; CHECK-NEXT:    // end inline asm
 ; CHECK-NEXT:    ret;
-
   %1 = addrspacecast ptr %flag to ptr addrspace(1)
   %2 = load i8, ptr addrspace(1) %1, align 1
   %3 = icmp eq i8 %2, 0
@@ -106,7 +97,6 @@ define void @test_store_b128_output() {
 ; CHECK-NEXT:    st.global.u64 [value+8], %rd4;
 ; CHECK-NEXT:    st.global.u64 [value], %rd3;
 ; CHECK-NEXT:    ret;
-
   %1 = tail call i128 asm "{ mov.b128 $0, 41; }", "=q"()
   %add = add nsw i128 %1, 1
   %2 = bitcast i128 %add to <2 x i64>
@@ -117,26 +107,24 @@ define void @test_store_b128_output() {
 define void @test_use_of_b128_output(ptr nocapture readonly %data) {
 ; CHECK-LABEL: test_use_of_b128_output(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<3>;
-; CHECK-NEXT:    .reg .b64 %rd<7>;
+; CHECK-NEXT:    .reg .b64 %rd<9>;
 ; CHECK-NEXT:    .reg .b128 %rq<3>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.param.u32 %r1, [test_use_of_b128_output_param_0];
-; CHECK-NEXT:    cvta.to.global.u32 %r2, %r1;
-; CHECK-NEXT:    ld.global.u64 %rd1, [%r2+8];
-; CHECK-NEXT:    ld.global.u64 %rd2, [%r2];
-; CHECK-NEXT:    mov.b128 %rq2, {%rd2, %rd1};
+; CHECK-NEXT:    ld.param.u64 %rd1, [test_use_of_b128_output_param_0];
+; CHECK-NEXT:    cvta.to.global.u64 %rd2, %rd1;
+; CHECK-NEXT:    ld.global.u64 %rd3, [%rd2+8];
+; CHECK-NEXT:    ld.global.u64 %rd4, [%rd2];
+; CHECK-NEXT:    mov.b128 %rq2, {%rd4, %rd3};
 ; CHECK-NEXT:    // begin inline asm
 ; CHECK-NEXT:    { mov.b128 %rq1, %rq2; }
 ; CHECK-NEXT:    // end inline asm
-; CHECK-NEXT:    mov.b128 {%rd3, %rd4}, %rq1;
-; CHECK-NEXT:    add.cc.s64 %rd5, %rd3, 1;
-; CHECK-NEXT:    addc.cc.s64 %rd6, %rd4, 0;
-; CHECK-NEXT:    st.global.u64 [value], %rd5;
-; CHECK-NEXT:    st.global.u64 [value+8], %rd6;
+; CHECK-NEXT:    mov.b128 {%rd5, %rd6}, %rq1;
+; CHECK-NEXT:    add.cc.s64 %rd7, %rd5, 1;
+; CHECK-NEXT:    addc.cc.s64 %rd8, %rd6, 0;
+; CHECK-NEXT:    st.global.u64 [value], %rd7;
+; CHECK-NEXT:    st.global.u64 [value+8], %rd8;
 ; CHECK-NEXT:    ret;
-
   %1 = addrspacecast ptr %data to ptr addrspace(1)
   %2 = load <2 x i64>, ptr addrspace(1) %1, align 16
   %3 = bitcast <2 x i64> %2 to i128

diff  --git a/llvm/test/CodeGen/NVPTX/inline-asm-b128-test2.ll b/llvm/test/CodeGen/NVPTX/inline-asm-b128-test2.ll
index 3d1d7fbbe27e8..6d8160a6714ef 100644
--- a/llvm/test/CodeGen/NVPTX/inline-asm-b128-test2.ll
+++ b/llvm/test/CodeGen/NVPTX/inline-asm-b128-test2.ll
@@ -1,6 +1,6 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --extra_scrub --version 5
-; RUN: llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx83 | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx83 | %ptxas-verify -arch=sm_70 %}
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx83 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx83 | %ptxas-verify -arch=sm_70 %}
 
 target triple = "nvptx64-nvidia-cuda"
 
@@ -17,20 +17,16 @@ target triple = "nvptx64-nvidia-cuda"
 define void @test_corner_values() {
 ; CHECK-LABEL: test_corner_values(
 ; CHECK:       {
-; CHECK-NEXT:    .reg .b32 %r<20>;
-; CHECK-NEXT:    .reg .b64 %rd<17>;
+; CHECK-NEXT:    .reg .b64 %rd<24>;
 ; CHECK-NEXT:    .reg .b128 %rq<5>;
 ; CHECK-EMPTY:
 ; CHECK-NEXT:  // %bb.0:
-; CHECK-NEXT:    ld.global.u32 %r1, [v64];
-; CHECK-NEXT:    add.s32 %r2, %r1, 8;
+; CHECK-NEXT:    ld.global.u64 %rd1, [v64];
+; CHECK-NEXT:    add.s64 %rd2, %rd1, 8;
 ; CHECK-NEXT:    mov.u64 %rd13, -1;
 ; CHECK-NEXT:    mov.b128 %rq1, {%rd13, %rd13};
-; CHECK-NEXT:    cvt.u64.u32 %rd1, %r1;
-; CHECK-NEXT:    cvt.u64.u32 %rd2, %r2;
-; CHECK-NEXT:    mov.u32 %r3, v_u128_max;
-; CHECK-NEXT:    cvta.global.u32 %r4, %r3;
-; CHECK-NEXT:    cvt.u64.u32 %rd3, %r4;
+; CHECK-NEXT:    mov.u64 %rd14, v_u128_max;
+; CHECK-NEXT:    cvta.global.u64 %rd3, %rd14;
 ; CHECK-NEXT:    // begin inline asm
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:    .reg .b64 hi;
@@ -41,16 +37,13 @@ define void @test_corner_values() {
 ; CHECK-NEXT:    st.b128 [%rd3], %rq1;
 ; CHECK-NEXT:    }
 ; CHECK-NEXT:    // end inline asm
-; CHECK-NEXT:    ld.global.u32 %r5, [v64];
-; CHECK-NEXT:    add.s32 %r6, %r5, 16;
-; CHECK-NEXT:    add.s32 %r7, %r5, 24;
-; CHECK-NEXT:    mov.u64 %rd14, 9223372036854775807;
-; CHECK-NEXT:    mov.b128 %rq2, {%rd13, %rd14};
-; CHECK-NEXT:    mov.u32 %r8, v_i128_max;
-; CHECK-NEXT:    cvta.global.u32 %r9, %r8;
-; CHECK-NEXT:    cvt.u64.u32 %rd6, %r9;
-; CHECK-NEXT:    cvt.u64.u32 %rd4, %r6;
-; CHECK-NEXT:    cvt.u64.u32 %rd5, %r7;
+; CHECK-NEXT:    ld.global.u64 %rd15, [v64];
+; CHECK-NEXT:    add.s64 %rd4, %rd15, 16;
+; CHECK-NEXT:    add.s64 %rd5, %rd15, 24;
+; CHECK-NEXT:    mov.u64 %rd16, 9223372036854775807;
+; CHECK-NEXT:    mov.b128 %rq2, {%rd13, %rd16};
+; CHECK-NEXT:    mov.u64 %rd17, v_i128_max;
+; CHECK-NEXT:    cvta.global.u64 %rd6, %rd17;
 ; CHECK-NEXT:    // begin inline asm
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:    .reg .b64 hi;
@@ -61,17 +54,14 @@ define void @test_corner_values() {
 ; CHECK-NEXT:    st.b128 [%rd6], %rq2;
 ; CHECK-NEXT:    }
 ; CHECK-NEXT:    // end inline asm
-; CHECK-NEXT:    ld.global.u32 %r10, [v64];
-; CHECK-NEXT:    add.s32 %r11, %r10, 32;
-; CHECK-NEXT:    add.s32 %r12, %r10, 40;
-; CHECK-NEXT:    mov.u64 %rd15, -9223372036854775808;
-; CHECK-NEXT:    mov.u64 %rd16, 0;
-; CHECK-NEXT:    mov.b128 %rq3, {%rd16, %rd15};
-; CHECK-NEXT:    mov.u32 %r13, v_i128_min;
-; CHECK-NEXT:    cvta.global.u32 %r14, %r13;
-; CHECK-NEXT:    cvt.u64.u32 %rd9, %r14;
-; CHECK-NEXT:    cvt.u64.u32 %rd7, %r11;
-; CHECK-NEXT:    cvt.u64.u32 %rd8, %r12;
+; CHECK-NEXT:    ld.global.u64 %rd18, [v64];
+; CHECK-NEXT:    add.s64 %rd7, %rd18, 32;
+; CHECK-NEXT:    add.s64 %rd8, %rd18, 40;
+; CHECK-NEXT:    mov.u64 %rd19, -9223372036854775808;
+; CHECK-NEXT:    mov.u64 %rd20, 0;
+; CHECK-NEXT:    mov.b128 %rq3, {%rd20, %rd19};
+; CHECK-NEXT:    mov.u64 %rd21, v_i128_min;
+; CHECK-NEXT:    cvta.global.u64 %rd9, %rd21;
 ; CHECK-NEXT:    // begin inline asm
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:    .reg .b64 hi;
@@ -82,15 +72,12 @@ define void @test_corner_values() {
 ; CHECK-NEXT:    st.b128 [%rd9], %rq3;
 ; CHECK-NEXT:    }
 ; CHECK-NEXT:    // end inline asm
-; CHECK-NEXT:    ld.global.u32 %r15, [v64];
-; CHECK-NEXT:    add.s32 %r16, %r15, 48;
-; CHECK-NEXT:    add.s32 %r17, %r15, 56;
-; CHECK-NEXT:    mov.b128 %rq4, {%rd16, %rd16};
-; CHECK-NEXT:    mov.u32 %r18, v_u128_zero;
-; CHECK-NEXT:    cvta.global.u32 %r19, %r18;
-; CHECK-NEXT:    cvt.u64.u32 %rd12, %r19;
-; CHECK-NEXT:    cvt.u64.u32 %rd10, %r16;
-; CHECK-NEXT:    cvt.u64.u32 %rd11, %r17;
+; CHECK-NEXT:    ld.global.u64 %rd22, [v64];
+; CHECK-NEXT:    add.s64 %rd10, %rd22, 48;
+; CHECK-NEXT:    add.s64 %rd11, %rd22, 56;
+; CHECK-NEXT:    mov.b128 %rq4, {%rd20, %rd20};
+; CHECK-NEXT:    mov.u64 %rd23, v_u128_zero;
+; CHECK-NEXT:    cvta.global.u64 %rd12, %rd23;
 ; CHECK-NEXT:    // begin inline asm
 ; CHECK-NEXT:    {
 ; CHECK-NEXT:    .reg .b64 hi;

diff  --git a/llvm/test/CodeGen/NVPTX/inline-asm-b128-test3.ll b/llvm/test/CodeGen/NVPTX/inline-asm-b128-test3.ll
index ae453977123e0..5bd54281edff5 100644
--- a/llvm/test/CodeGen/NVPTX/inline-asm-b128-test3.ll
+++ b/llvm/test/CodeGen/NVPTX/inline-asm-b128-test3.ll
@@ -1,6 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --extra_scrub --version 5
-; RUN: llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx83 | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -march=nvptx -mcpu=sm_70 -mattr=+ptx83 | %ptxas-verify -arch=sm_70 %}
+; RUN: llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx83 | FileCheck %s
+; RUN: %if ptxas %{ llc < %s -march=nvptx64 -mcpu=sm_70 -mattr=+ptx83 | %ptxas-verify -arch=sm_70 %}
 
 target triple = "nvptx64-nvidia-cuda"
 


        


More information about the llvm-commits mailing list