[llvm] r248600 - AMDGPU: Add some more tests for literal operands

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 25 11:21:47 PDT 2015


Author: arsenm
Date: Fri Sep 25 13:21:47 2015
New Revision: 248600

URL: http://llvm.org/viewvc/llvm-project?rev=248600&view=rev
Log:
AMDGPU: Add some more tests for literal operands

Modified:
    llvm/trunk/test/CodeGen/AMDGPU/and.ll
    llvm/trunk/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll

Modified: llvm/trunk/test/CodeGen/AMDGPU/and.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/and.ll?rev=248600&r1=248599&r2=248600&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/and.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/and.ll Fri Sep 25 13:21:47 2015
@@ -2,6 +2,8 @@
 ; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s
 
+declare i32 @llvm.r600.read.tidig.x() #0
+
 ; FUNC-LABEL: {{^}}test2:
 ; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
 ; EG: AND_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
@@ -54,13 +56,80 @@ define void @s_and_constant_i32(i32 addr
   ret void
 }
 
-; FUNC-LABEL: {{^}}v_and_i32:
-; SI: v_and_b32
-define void @v_and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
-  %a = load i32, i32 addrspace(1)* %aptr, align 4
-  %b = load i32, i32 addrspace(1)* %bptr, align 4
+; FIXME: We should really duplicate the constant so that the SALU use
+; can fold into the s_and_b32 and the VALU one is materialized
+; directly without copying from the SGPR.
+
+; Second use is a VGPR use of the constant.
+; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_0:
+; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
+; SI-DAG: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
+; SI-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], [[K]]
+; SI: buffer_store_dword [[VK]]
+define void @s_and_multi_use_constant_i32_0(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+  %and = and i32 %a, 1234567
+
+  ; Just to stop future replacement of copy to vgpr + store with VALU op.
+  %foo = add i32 %and, %b
+  store volatile i32 %foo, i32 addrspace(1)* %out
+  store volatile i32 1234567, i32 addrspace(1)* %out
+  ret void
+}
+
+; Second use is another SGPR use of the constant.
+; FUNC-LABEL: {{^}}s_and_multi_use_constant_i32_1:
+; SI: s_mov_b32 [[K:s[0-9]+]], 0x12d687
+; SI: s_and_b32 [[AND:s[0-9]+]], s{{[0-9]+}}, [[K]]
+; SI: s_add_i32
+; SI: s_add_i32 [[ADD:s[0-9]+]], s{{[0-9]+}}, [[K]]
+; SI: buffer_store_dword [[VK]]
+define void @s_and_multi_use_constant_i32_1(i32 addrspace(1)* %out, i32 %a, i32 %b) {
+  %and = and i32 %a, 1234567
+  %foo = add i32 %and, 1234567
+  %bar = add i32 %foo, %b
+  store volatile i32 %bar, i32 addrspace(1)* %out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}v_and_i32_vgpr_vgpr:
+; SI: v_and_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
+define void @v_and_i32_vgpr_vgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 addrspace(1)* %bptr) {
+  %tid = call i32 @llvm.r600.read.tidig.x() #0
+  %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+  %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+  %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  %a = load i32, i32 addrspace(1)* %gep.a
+  %b = load i32, i32 addrspace(1)* %gep.b
   %and = and i32 %a, %b
-  store i32 %and, i32 addrspace(1)* %out, align 4
+  store i32 %and, i32 addrspace(1)* %gep.out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}v_and_i32_sgpr_vgpr:
+; SI-DAG: s_load_dword [[SA:s[0-9]+]]
+; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
+; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
+define void @v_and_i32_sgpr_vgpr(i32 addrspace(1)* %out, i32 %a, i32 addrspace(1)* %bptr) {
+  %tid = call i32 @llvm.r600.read.tidig.x() #0
+  %gep.b = getelementptr i32, i32 addrspace(1)* %bptr, i32 %tid
+  %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  %b = load i32, i32 addrspace(1)* %gep.b
+  %and = and i32 %a, %b
+  store i32 %and, i32 addrspace(1)* %gep.out
+  ret void
+}
+
+; FUNC-LABEL: {{^}}v_and_i32_vgpr_sgpr:
+; SI-DAG: s_load_dword [[SA:s[0-9]+]]
+; SI-DAG: {{buffer|flat}}_load_dword [[VB:v[0-9]+]]
+; SI: v_and_b32_e32 v{{[0-9]+}}, [[SA]], [[VB]]
+define void @v_and_i32_vgpr_sgpr(i32 addrspace(1)* %out, i32 addrspace(1)* %aptr, i32 %b) {
+  %tid = call i32 @llvm.r600.read.tidig.x() #0
+  %gep.a = getelementptr i32, i32 addrspace(1)* %aptr, i32 %tid
+  %gep.out = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
+  %a = load i32, i32 addrspace(1)* %gep.a
+  %and = and i32 %a, %b
+  store i32 %and, i32 addrspace(1)* %gep.out
   ret void
 }
 
@@ -308,3 +377,5 @@ define void @s_and_inline_high_imm_f32_n
   store i64 %and, i64 addrspace(1)* %out, align 8
   ret void
 }
+
+attributes #0 = { nounwind readnone }

Modified: llvm/trunk/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll?rev=248600&r1=248599&r2=248600&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/use-sgpr-multiple-times.ll Fri Sep 25 13:21:47 2015
@@ -2,6 +2,7 @@
 ; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=GCN %s
 
 declare float @llvm.fma.f32(float, float, float) #1
+declare double @llvm.fma.f64(double, double, double) #1
 declare float @llvm.fmuladd.f32(float, float, float) #1
 declare i32 @llvm.AMDGPU.imad24(i32, i32, i32) #1
 
@@ -99,5 +100,158 @@ define void @test_sgpr_use_twice_ternary
   ret void
 }
 
+; GCN-LABEL: {{^}}test_sgpr_use_twice_ternary_op_a_a_kimm:
+; GCN-DAG: s_load_dword [[SGPR:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
+; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[SGPR]], [[VK]]
+; GCN: buffer_store_dword [[RESULT]]
+define void @test_sgpr_use_twice_ternary_op_a_a_kimm(float addrspace(1)* %out, float %a) #0 {
+  %fma = call float @llvm.fma.f32(float %a, float %a, float 1024.0) #1
+  store float %fma, float addrspace(1)* %out, align 4
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_literal_use_twice_ternary_op_k_k_s:
+; GCN-DAG: s_load_dword [[SGPR:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
+; GCN: v_fma_f32 [[RESULT0:v[0-9]+]], [[VK]], [[VK]], [[SGPR]]
+; GCN: buffer_store_dword [[RESULT0]]
+define void @test_literal_use_twice_ternary_op_k_k_s(float addrspace(1)* %out, float %a) #0 {
+  %fma = call float @llvm.fma.f32(float 1024.0, float 1024.0, float %a) #1
+  store float %fma, float addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_literal_use_twice_ternary_op_k_k_s_x2:
+; GCN-DAG: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
+; GCN-DAG: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
+; GCN-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], [[VK]], [[VK]], [[SGPR0]]
+; GCN-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[VK]], [[VK]], [[SGPR1]]
+; GCN: buffer_store_dword [[RESULT0]]
+; GCN: buffer_store_dword [[RESULT1]]
+; GCN: s_endpgm
+define void @test_literal_use_twice_ternary_op_k_k_s_x2(float addrspace(1)* %out, float %a, float %b) #0 {
+  %fma0 = call float @llvm.fma.f32(float 1024.0, float 1024.0, float %a) #1
+  %fma1 = call float @llvm.fma.f32(float 1024.0, float 1024.0, float %b) #1
+  store volatile float %fma0, float addrspace(1)* %out
+  store volatile float %fma1, float addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_literal_use_twice_ternary_op_k_s_k:
+; GCN-DAG: s_load_dword [[SGPR:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
+; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[VK]], [[VK]]
+; GCN: buffer_store_dword [[RESULT]]
+define void @test_literal_use_twice_ternary_op_k_s_k(float addrspace(1)* %out, float %a) #0 {
+  %fma = call float @llvm.fma.f32(float 1024.0, float %a, float 1024.0) #1
+  store float %fma, float addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_literal_use_twice_ternary_op_k_s_k_x2:
+; GCN-DAG: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
+; GCN-DAG: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
+; GCN-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], [[SGPR0]], [[VK]], [[VK]]
+; GCN-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[SGPR1]], [[VK]], [[VK]]
+; GCN: buffer_store_dword [[RESULT0]]
+; GCN: buffer_store_dword [[RESULT1]]
+; GCN: s_endpgm
+define void @test_literal_use_twice_ternary_op_k_s_k_x2(float addrspace(1)* %out, float %a, float %b) #0 {
+  %fma0 = call float @llvm.fma.f32(float 1024.0, float %a, float 1024.0) #1
+  %fma1 = call float @llvm.fma.f32(float 1024.0, float %b, float 1024.0) #1
+  store volatile float %fma0, float addrspace(1)* %out
+  store volatile float %fma1, float addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_literal_use_twice_ternary_op_s_k_k:
+; GCN-DAG: s_load_dword [[SGPR:s[0-9]+]]
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
+; GCN: v_fma_f32 [[RESULT:v[0-9]+]], [[SGPR]], [[VK]], [[VK]]
+; GCN: buffer_store_dword [[RESULT]]
+define void @test_literal_use_twice_ternary_op_s_k_k(float addrspace(1)* %out, float %a) #0 {
+  %fma = call float @llvm.fma.f32(float %a, float 1024.0, float 1024.0) #1
+  store float %fma, float addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_literal_use_twice_ternary_op_s_k_k_x2:
+; GCN-DAG: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
+; GCN-DAG: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN-DAG: v_mov_b32_e32 [[VK:v[0-9]+]], 0x44800000
+; GCN-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], [[SGPR0]], [[VK]], [[VK]]
+; GCN-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[SGPR1]], [[VK]], [[VK]]
+; GCN: buffer_store_dword [[RESULT0]]
+; GCN: buffer_store_dword [[RESULT1]]
+; GCN: s_endpgm
+define void @test_literal_use_twice_ternary_op_s_k_k_x2(float addrspace(1)* %out, float %a, float %b) #0 {
+  %fma0 = call float @llvm.fma.f32(float %a, float 1024.0, float 1024.0) #1
+  %fma1 = call float @llvm.fma.f32(float %b, float 1024.0, float 1024.0) #1
+  store volatile float %fma0, float addrspace(1)* %out
+  store volatile float %fma1, float addrspace(1)* %out
+  ret void
+}
+
+; GCN-LABEL: {{^}}test_s0_s1_k_f32:
+; GCN-DAG: s_load_dword [[SGPR0:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
+; GCN-DAG: s_load_dword [[SGPR1:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0xc|0x30}}
+; GCN-DAG: v_mov_b32_e32 [[VK0:v[0-9]+]], 0x44800000
+
+; FIXME: Why do we end up with 2 copies of the same SGPR? These should be CSE'd
+; GCN: v_mov_b32_e32 [[VS1_1:v[0-9]+]], [[SGPR1]]
+; GCN: v_mov_b32_e32 [[VS1_0:v[0-9]+]], [[SGPR1]]
+
+; GCN-DAG: v_fma_f32 [[RESULT0:v[0-9]+]], [[SGPR0]], [[VS1_0]], [[VK0]]
+; GCN-DAG: v_mov_b32_e32 [[VK1:v[0-9]+]], 0x45800000
+; GCN-DAG: v_fma_f32 [[RESULT1:v[0-9]+]], [[SGPR0]], [[VS1_1]], [[VK1]]
+
+; GCN: buffer_store_dword [[RESULT0]]
+; GCN: buffer_store_dword [[RESULT1]]
+define void @test_s0_s1_k_f32(float addrspace(1)* %out, float %a, float %b) #0 {
+  %fma0 = call float @llvm.fma.f32(float %a, float %b, float 1024.0) #1
+  %fma1 = call float @llvm.fma.f32(float %a, float %b, float 4096.0) #1
+  store volatile float %fma0, float addrspace(1)* %out
+  store volatile float %fma1, float addrspace(1)* %out
+  ret void
+}
+
+; FIXME: Immediate in SGPRs just copied to VGPRs
+; GCN-LABEL: {{^}}test_s0_s1_k_f64:
+; GCN-DAG: s_load_dwordx2 [[SGPR0:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, {{0xb|0x2c}}
+; GCN-DAG: s_load_dwordx2 s{{\[}}[[SGPR1_SUB0:[0-9]+]]:[[SGPR1_SUB1:[0-9]+]]{{\]}}, s{{\[[0-9]+:[0-9]+\]}}, {{0xd|0x34}}
+; GCN-DAG: s_mov_b32 s[[SK0_SUB1:[0-9]+]], 0x40900000
+; GCN-DAG: s_mov_b32 s[[SZERO:[0-9]+]], 0{{$}}
+; GCN-DAG: v_mov_b32_e32 v[[VK0_SUB0:[0-9]+]], s[[SZERO]]
+; GCN-DAG: v_mov_b32_e32 v[[VK0_SUB1:[0-9]+]], s[[SK0_SUB1]]
+
+; GCN-DAG: s_mov_b32 s[[SK1_SUB0:[0-9]+]], 0x40b00000{{$}}
+
+; FIXME: Redundant copies
+; GCN: v_mov_b32_e32 v[[VS1_1_SUB0:[0-9]+]], s[[SGPR1_SUB0]]
+; GCN: v_mov_b32_e32 v[[VS1_1_SUB1:[0-9]+]], s[[SGPR1_SUB1]]
+; GCN: v_mov_b32_e32 v[[VS1_0_SUB0:[0-9]+]], s[[SGPR1_SUB0]]
+; GCN: v_mov_b32_e32 v[[VS1_0_SUB1:[0-9]+]], s[[SGPR1_SUB1]]
+
+
+; GCN-DAG: v_fma_f64 [[RESULT0:v\[[0-9]+:[0-9]+\]]], [[SGPR0]], v{{\[}}[[VS1_0_SUB0]]:[[VS1_0_SUB1]]{{\]}}, v{{\[}}[[VK0_SUB0]]:[[VK0_SUB1]]{{\]}}
+
+; GCN-DAG: v_mov_b32_e32 v[[VK1_SUB0:[0-9]+]], s[[SZERO]]
+; GCN-DAG: v_mov_b32_e32 v[[VK1_SUB1:[0-9]+]], s[[SK1_SUB0]]
+
+; GCN-DAG: v_fma_f64 [[RESULT1:v\[[0-9]+:[0-9]+\]]], [[SGPR0]], v{{\[}}[[VS1_1_SUB0]]:[[VS1_1_SUB1]]{{\]}}, v{{\[}}[[VK1_SUB0]]:[[VK1_SUB1]]{{\]}}
+
+; GCN: buffer_store_dwordx2 [[RESULT0]]
+; GCN: buffer_store_dwordx2 [[RESULT1]]
+define void @test_s0_s1_k_f64(double addrspace(1)* %out, double %a, double %b) #0 {
+  %fma0 = call double @llvm.fma.f64(double %a, double %b, double 1024.0) #1
+  %fma1 = call double @llvm.fma.f64(double %a, double %b, double 4096.0) #1
+  store volatile double %fma0, double addrspace(1)* %out
+  store volatile double %fma1, double addrspace(1)* %out
+  ret void
+}
+
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readnone }




More information about the llvm-commits mailing list