[llvm] d844384 - [AMDGPU] fneg-combines.ll - regenerate test checks
Simon Pilgrim via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 10 02:24:41 PDT 2025
Author: Simon Pilgrim
Date: 2025-07-10T10:23:09+01:00
New Revision: d844384ad90aac733d0429dd25b6640570599caa
URL: https://github.com/llvm/llvm-project/commit/d844384ad90aac733d0429dd25b6640570599caa
DIFF: https://github.com/llvm/llvm-project/commit/d844384ad90aac733d0429dd25b6640570599caa.diff
LOG: [AMDGPU] fneg-combines.ll - regenerate test checks
Added:
Modified:
llvm/test/CodeGen/AMDGPU/fneg-combines.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
index 14c18df6d8e41..ba34e9245f39c 100644
--- a/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
+++ b/llvm/test/CodeGen/AMDGPU/fneg-combines.ll
@@ -1,23 +1,108 @@
-; RUN: llc -mtriple=amdgcn -mcpu=hawaii -start-before=amdgpu-unify-divergent-exit-nodes -mattr=+flat-for-global < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-SAFE,SI %s
-; RUN: llc -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=hawaii -mattr=+flat-for-global -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-NSZ,SI %s
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn -mcpu=hawaii -start-before=amdgpu-unify-divergent-exit-nodes -mattr=+flat-for-global < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-SAFE,SI,SI-SAFE %s
+; RUN: llc -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=hawaii -mattr=+flat-for-global -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-NSZ,SI,SI-NSZ %s
-; RUN: llc -mtriple=amdgcn -mcpu=fiji -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-SAFE,VI %s
-; RUN: llc -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-NSZ,VI %s
+; RUN: llc -mtriple=amdgcn -mcpu=fiji -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-SAFE,VI,VI-SAFE %s
+; RUN: llc -enable-no-signed-zeros-fp-math -mtriple=amdgcn -mcpu=fiji -start-before=amdgpu-unify-divergent-exit-nodes < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-NSZ,VI,VI-NSZ %s
; --------------------------------------------------------------------------------
; fadd tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_add_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-
-; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]]
-
-; GCN-NSZ: v_sub_f32_e64 [[RESULT:v[0-9]+]], -[[A]], [[B]]
-; GCN-NSZ-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_add_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_add_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: flat_load_dword v5, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_add_f32_e32 v2, v5, v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_add_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: flat_load_dword v5, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_sub_f32_e64 v2, -v5, v2
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_add_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: flat_load_dword v5, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_add_f32_e32 v2, v5, v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_add_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: flat_load_dword v5, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_sub_f32_e64 v2, -v5, v2
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -31,16 +116,58 @@ define amdgpu_kernel void @v_fneg_add_f32(ptr addrspace(1) %out, ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}v_fneg_add_store_use_add_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_add_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_ADD]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_add_store_use_add_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_add_store_use_add_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_f32_e32 v2, v4, v2
+; SI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_add_store_use_add_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_f32_e32 v2, v4, v2
+; VI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -55,22 +182,112 @@ define amdgpu_kernel void @v_fneg_add_store_use_add_f32(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_add_multi_use_add_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-
-; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN-SAFE: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]]
-; GCN-SAFE: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[ADD]]
-
-; GCN-NSZ: v_sub_f32_e64 [[NEG_ADD:v[0-9]+]], -[[A]], [[B]]
-; GCN-NSZ-NEXT: v_mul_f32_e32 [[MUL:v[0-9]+]], -4.0, [[NEG_ADD]]
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_ADD]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_add_multi_use_add_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_add_multi_use_add_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: v_add_f32_e32 v2, v4, v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; SI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_add_multi_use_add_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: v_sub_f32_e64 v2, -v4, v2
+; SI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_add_multi_use_add_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: v_add_f32_e32 v2, v4, v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; VI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_add_multi_use_add_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: v_sub_f32_e64 v2, -v4, v2
+; VI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -86,17 +303,100 @@ define amdgpu_kernel void @v_fneg_add_multi_use_add_f32(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_add_fneg_x_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-
-; GCN-SAFE: v_sub_f32_e32
-; GCN-SAFE: v_xor_b32_e32 [[ADD:v[0-9]+]], 0x80000000,
-
-; GCN-NSZ: v_sub_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_add_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_add_fneg_x_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_sub_f32_e32 v0, v1, v0
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_add_fneg_x_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_sub_f32_e32 v2, v0, v1
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_add_fneg_x_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_sub_f32_e32 v0, v1, v0
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_add_fneg_x_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_sub_f32_e32 v2, v0, v1
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -111,16 +411,100 @@ define amdgpu_kernel void @v_fneg_add_fneg_x_f32(ptr addrspace(1) %out, ptr addr
ret void
}
-; GCN-LABEL: {{^}}v_fneg_add_x_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-
-; GCN-SAFE: v_sub_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]]
-
-; GCN-NSZ: v_sub_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
-; GCN-NSZ: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_add_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_add_x_fneg_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_sub_f32_e32 v0, v0, v1
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_add_x_fneg_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_sub_f32_e32 v2, v1, v0
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_add_x_fneg_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_sub_f32_e32 v0, v0, v1
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_add_x_fneg_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_sub_f32_e32 v2, v1, v0
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -135,16 +519,100 @@ define amdgpu_kernel void @v_fneg_add_x_fneg_f32(ptr addrspace(1) %out, ptr addr
ret void
}
-; GCN-LABEL: {{^}}v_fneg_add_fneg_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-
-; GCN-SAFE: v_sub_f32_e64 [[ADD:v[0-9]+]], -[[A]], [[B]]
-; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]]
-
-; GCN-NSZ: v_add_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN-NSZ: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_add_fneg_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_add_fneg_fneg_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_sub_f32_e64 v0, -v0, v1
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_add_fneg_fneg_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_add_f32_e32 v2, v0, v1
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_add_fneg_fneg_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_sub_f32_e64 v0, -v0, v1
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_add_fneg_fneg_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_add_f32_e32 v2, v0, v1
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -160,21 +628,112 @@ define amdgpu_kernel void @v_fneg_add_fneg_fneg_f32(ptr addrspace(1) %out, ptr a
ret void
}
-; GCN-LABEL: {{^}}v_fneg_add_store_use_fneg_x_f32:
-; GCN-DAG: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-
-; GCN-SAFE: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN-SAFE: v_sub_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
-; GCN-SAFE: v_xor_b32_e32 [[NEG_ADD:v[0-9]+]], 0x80000000, [[ADD]]
-
-; GCN-NSZ-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN-NSZ-DAG: v_sub_f32_e32 [[NEG_ADD:v[0-9]+]], [[A]], [[B]]
-; GCN-NSZ-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_ADD]]
-; GCN-NSZ-NEXT: s_waitcnt vmcnt(0)
-; GCN-NSZ-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
-; GCN-NSZ-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_add_store_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_add_store_use_fneg_x_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: v_xor_b32_e32 v3, 0x80000000, v4
+; SI-SAFE-NEXT: v_sub_f32_e32 v2, v2, v4
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_add_store_use_fneg_x_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: v_xor_b32_e32 v3, 0x80000000, v4
+; SI-NSZ-NEXT: v_sub_f32_e32 v2, v4, v2
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_add_store_use_fneg_x_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SAFE-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: v_xor_b32_e32 v3, 0x80000000, v4
+; VI-SAFE-NEXT: v_sub_f32_e32 v2, v2, v4
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_add_store_use_fneg_x_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NSZ-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: v_xor_b32_e32 v3, 0x80000000, v4
+; VI-NSZ-NEXT: v_sub_f32_e32 v2, v4, v2
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -190,21 +749,120 @@ define amdgpu_kernel void @v_fneg_add_store_use_fneg_x_f32(ptr addrspace(1) %out
ret void
}
-; GCN-LABEL: {{^}}v_fneg_add_multi_use_fneg_x_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-
-; GCN-SAFE-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
-; GCN-SAFE-DAG: v_sub_f32_e32 [[ADD:v[0-9]+]], [[B]], [[A]]
-; GCN-SAFE-DAG: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[ADD]]
-
-; GCN-NSZ-DAG: v_sub_f32_e32 [[NEG_ADD:v[0-9]+]], [[A]], [[B]]
-; GCN-NSZ-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
-; GCN-NSZ-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_ADD]]
-; GCN-NSZ-NEXT: s_waitcnt vmcnt(0)
-; GCN-NSZ-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
-; GCN-NSZ-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_add_multi_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, float %c) #0 {
+; SI-SAFE-LABEL: v_fneg_add_multi_use_fneg_x_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SAFE-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s6, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_load_dword s2, s[4:5], 0xf
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mul_f32_e64 v3, -v4, s2
+; SI-SAFE-NEXT: v_sub_f32_e32 v2, v2, v4
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_add_multi_use_fneg_x_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NSZ-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s6, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_load_dword s2, s[4:5], 0xf
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mul_f32_e64 v3, -v4, s2
+; SI-NSZ-NEXT: v_sub_f32_e32 v2, v4, v2
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_add_multi_use_fneg_x_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SAFE-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s6, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_load_dword s2, s[4:5], 0x3c
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mul_f32_e64 v3, -v4, s2
+; VI-SAFE-NEXT: v_sub_f32_e32 v2, v2, v4
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_add_multi_use_fneg_x_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NSZ-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s6, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_load_dword s2, s[4:5], 0x3c
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mul_f32_e64 v3, -v4, s2
+; VI-NSZ-NEXT: v_sub_f32_e32 v2, v4, v2
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -222,15 +880,102 @@ define amdgpu_kernel void @v_fneg_add_multi_use_fneg_x_f32(ptr addrspace(1) %out
}
; This one asserted with -enable-no-signed-zeros-fp-math
-; GCN-LABEL: {{^}}fneg_fadd_0:
-; GCN-SAFE-DAG: v_mad_f32 [[A:v[0-9]+]],
-; GCN-SAFE-DAG: v_cmp_ngt_f32_e32 {{.*}}, [[A]]
-; GCN-SAFE-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, -[[A]]
-
-; GCN-NSZ-DAG: v_mul_f32_e32 v{{[0-9]+}}, 0, v
-; GCN-NSZ: v_cmp_ngt_f32
-; GCN-NSZ: v_cndmask_b32_e64 v{{[0-9]+}}, -v{{[0-9]+}}, v{{[0-9]+}}
define amdgpu_ps float @fneg_fadd_0(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) local_unnamed_addr #0 {
+; SI-SAFE-LABEL: fneg_fadd_0:
+; SI-SAFE: ; %bb.0: ; %.entry
+; SI-SAFE-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0
+; SI-SAFE-NEXT: v_rcp_f32_e32 v1, v0
+; SI-SAFE-NEXT: v_div_scale_f32 v2, vcc, 1.0, s1, 1.0
+; SI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
+; SI-SAFE-NEXT: v_fma_f32 v3, -v0, v1, 1.0
+; SI-SAFE-NEXT: v_fma_f32 v1, v3, v1, v1
+; SI-SAFE-NEXT: v_mul_f32_e32 v3, v2, v1
+; SI-SAFE-NEXT: v_fma_f32 v4, -v0, v3, v2
+; SI-SAFE-NEXT: v_fma_f32 v3, v4, v1, v3
+; SI-SAFE-NEXT: v_fma_f32 v0, -v0, v3, v2
+; SI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
+; SI-SAFE-NEXT: v_div_fmas_f32 v0, v0, v1, v3
+; SI-SAFE-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0
+; SI-SAFE-NEXT: v_mad_f32 v0, v0, 0, 0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s0
+; SI-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0
+; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000
+; SI-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; SI-SAFE-NEXT: ; return to shader part epilog
+;
+; SI-NSZ-LABEL: fneg_fadd_0:
+; SI-NSZ: ; %bb.0: ; %.entry
+; SI-NSZ-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0
+; SI-NSZ-NEXT: v_rcp_f32_e32 v1, v0
+; SI-NSZ-NEXT: v_div_scale_f32 v2, vcc, 1.0, s1, 1.0
+; SI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
+; SI-NSZ-NEXT: v_fma_f32 v3, -v0, v1, 1.0
+; SI-NSZ-NEXT: v_fma_f32 v1, v3, v1, v1
+; SI-NSZ-NEXT: v_mul_f32_e32 v3, v2, v1
+; SI-NSZ-NEXT: v_fma_f32 v4, -v0, v3, v2
+; SI-NSZ-NEXT: v_fma_f32 v3, v4, v1, v3
+; SI-NSZ-NEXT: v_fma_f32 v0, -v0, v3, v2
+; SI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
+; SI-NSZ-NEXT: v_div_fmas_f32 v0, v0, v1, v3
+; SI-NSZ-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0
+; SI-NSZ-NEXT: v_mul_f32_e32 v0, 0, v0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s0
+; SI-NSZ-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0
+; SI-NSZ-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000
+; SI-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; SI-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; SI-NSZ-NEXT: ; return to shader part epilog
+;
+; VI-SAFE-LABEL: fneg_fadd_0:
+; VI-SAFE: ; %bb.0: ; %.entry
+; VI-SAFE-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0
+; VI-SAFE-NEXT: v_div_scale_f32 v1, vcc, 1.0, s1, 1.0
+; VI-SAFE-NEXT: v_rcp_f32_e32 v2, v0
+; VI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
+; VI-SAFE-NEXT: v_fma_f32 v3, -v0, v2, 1.0
+; VI-SAFE-NEXT: v_fma_f32 v2, v3, v2, v2
+; VI-SAFE-NEXT: v_mul_f32_e32 v3, v1, v2
+; VI-SAFE-NEXT: v_fma_f32 v4, -v0, v3, v1
+; VI-SAFE-NEXT: v_fma_f32 v3, v4, v2, v3
+; VI-SAFE-NEXT: v_fma_f32 v0, -v0, v3, v1
+; VI-SAFE-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
+; VI-SAFE-NEXT: v_div_fmas_f32 v0, v0, v2, v3
+; VI-SAFE-NEXT: v_mov_b32_e32 v2, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000
+; VI-SAFE-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0
+; VI-SAFE-NEXT: v_mad_f32 v0, v0, 0, 0
+; VI-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0
+; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v2, vcc
+; VI-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; VI-SAFE-NEXT: ; return to shader part epilog
+;
+; VI-NSZ-LABEL: fneg_fadd_0:
+; VI-NSZ: ; %bb.0: ; %.entry
+; VI-NSZ-NEXT: v_div_scale_f32 v0, s[2:3], s1, s1, 1.0
+; VI-NSZ-NEXT: v_div_scale_f32 v1, vcc, 1.0, s1, 1.0
+; VI-NSZ-NEXT: v_rcp_f32_e32 v2, v0
+; VI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
+; VI-NSZ-NEXT: v_fma_f32 v3, -v0, v2, 1.0
+; VI-NSZ-NEXT: v_fma_f32 v2, v3, v2, v2
+; VI-NSZ-NEXT: v_mul_f32_e32 v3, v1, v2
+; VI-NSZ-NEXT: v_fma_f32 v4, -v0, v3, v1
+; VI-NSZ-NEXT: v_fma_f32 v3, v4, v2, v3
+; VI-NSZ-NEXT: v_fma_f32 v0, -v0, v3, v1
+; VI-NSZ-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
+; VI-NSZ-NEXT: v_div_fmas_f32 v0, v0, v2, v3
+; VI-NSZ-NEXT: v_mov_b32_e32 v2, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000
+; VI-NSZ-NEXT: v_div_fixup_f32 v0, v0, s1, 1.0
+; VI-NSZ-NEXT: v_mul_f32_e32 v0, 0, v0
+; VI-NSZ-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0
+; VI-NSZ-NEXT: v_cndmask_b32_e64 v0, -v0, v2, vcc
+; VI-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; VI-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; VI-NSZ-NEXT: ; return to shader part epilog
.entry:
%tmp7 = fdiv float 1.000000e+00, %tmp6
%tmp8 = fmul float 0.000000e+00, %tmp7
@@ -247,16 +992,31 @@ define amdgpu_ps float @fneg_fadd_0(float inreg %tmp2, float inreg %tmp6, <4 x i
; This is a workaround because -enable-no-signed-zeros-fp-math does not set up
; function attribute unsafe-fp-math automatically. Combine with the previous test
; when that is done.
-; GCN-LABEL: {{^}}fneg_fadd_0_nsz:
-; GCN-NSZ-DAG: v_rcp_f32_e32 [[A:v[0-9]+]],
-; GCN-NSZ-DAG: v_mov_b32_e32 [[B:v[0-9]+]],
-; GCN-NSZ-DAG: v_mov_b32_e32 [[C:v[0-9]+]], 0x7fc00000
-; GCN-NSZ-DAG: v_mul_f32_e32 [[D:v[0-9]+]], 0, [[A]]
-; GCN-NSZ-DAG: v_cmp_ngt_f32_e32 {{.*}}, s{{[0-9]+}}, [[D]]
-; GCN-NSZ-DAG: v_cndmask_b32_e64 [[E:v[0-9]+]], -[[D]], v{{[0-9]+}},
-; GCN-NSZ-DAG: v_cmp_nlt_f32_e32 {{.*}}, 0
-; GCN-NSZ-DAG: v_cndmask_b32_e64 v{{[0-9]+}}, [[C]], 0,
define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4 x i32> %arg) local_unnamed_addr #2 {
+; GCN-SAFE-LABEL: fneg_fadd_0_nsz:
+; GCN-SAFE: ; %bb.0: ; %.entry
+; GCN-SAFE-NEXT: v_rcp_f32_e32 v0, s1
+; GCN-SAFE-NEXT: v_mov_b32_e32 v1, s0
+; GCN-SAFE-NEXT: v_mul_f32_e32 v0, 0, v0
+; GCN-SAFE-NEXT: v_add_f32_e32 v0, 0, v0
+; GCN-SAFE-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0
+; GCN-SAFE-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc
+; GCN-SAFE-NEXT: v_mov_b32_e32 v1, 0x7fc00000
+; GCN-SAFE-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; GCN-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; GCN-SAFE-NEXT: ; return to shader part epilog
+;
+; GCN-NSZ-LABEL: fneg_fadd_0_nsz:
+; GCN-NSZ: ; %bb.0: ; %.entry
+; GCN-NSZ-NEXT: v_rcp_f32_e32 v0, s1
+; GCN-NSZ-NEXT: v_mov_b32_e32 v1, s0
+; GCN-NSZ-NEXT: v_mul_f32_e32 v0, 0, v0
+; GCN-NSZ-NEXT: v_cmp_ngt_f32_e32 vcc, s0, v0
+; GCN-NSZ-NEXT: v_cndmask_b32_e64 v0, -v0, v1, vcc
+; GCN-NSZ-NEXT: v_mov_b32_e32 v1, 0x7fc00000
+; GCN-NSZ-NEXT: v_cmp_nlt_f32_e32 vcc, 0, v0
+; GCN-NSZ-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; GCN-NSZ-NEXT: ; return to shader part epilog
.entry:
%tmp7 = fdiv afn float 1.000000e+00, %tmp6
%tmp8 = fmul float 0.000000e+00, %tmp7
@@ -274,12 +1034,52 @@ define amdgpu_ps float @fneg_fadd_0_nsz(float inreg %tmp2, float inreg %tmp6, <4
; fmul tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_mul_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v5, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e64 v2, v5, -v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v5, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e64 v2, v5, -v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -293,14 +1093,58 @@ define amdgpu_kernel void @v_fneg_mul_f32(ptr addrspace(1) %out, ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_store_use_mul_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_MUL:v[0-9]+]], 0x80000000, [[ADD]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_mul_store_use_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_store_use_mul_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_mul_f32_e32 v2, v4, v2
+; SI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_store_use_mul_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_mul_f32_e32 v2, v4, v2
+; VI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -315,17 +1159,58 @@ define amdgpu_kernel void @v_fneg_mul_store_use_mul_f32(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_multi_use_mul_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e64 [[MUL0:v[0-9]+]], [[A]], -[[B]]
-; GCN-NEXT: v_mul_f32_e32 [[MUL1:v[0-9]+]], -4.0, [[MUL0]]
-
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL0]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_multi_use_mul_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_mul_f32_e64 v2, v4, -v2
+; SI-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_multi_use_mul_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_mul_f32_e64 v2, v4, -v2
+; VI-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -341,12 +1226,52 @@ define amdgpu_kernel void @v_fneg_mul_multi_use_mul_f32(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_fneg_x_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_fneg_x_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_f32_e32 v2, v0, v1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_fneg_x_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mul_f32_e32 v2, v0, v1
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -361,12 +1286,52 @@ define amdgpu_kernel void @v_fneg_mul_fneg_x_f32(ptr addrspace(1) %out, ptr addr
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_x_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_x_fneg_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_f32_e32 v2, v0, v1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_x_fneg_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mul_f32_e32 v2, v0, v1
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -381,12 +1346,52 @@ define amdgpu_kernel void @v_fneg_mul_x_fneg_f32(ptr addrspace(1) %out, ptr addr
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_fneg_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_fneg_fneg_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_f32_e64 v2, v0, -v1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_fneg_fneg_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mul_f32_e64 v2, v0, -v1
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -402,15 +1407,58 @@ define amdgpu_kernel void @v_fneg_mul_fneg_fneg_f32(ptr addrspace(1) %out, ptr a
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_store_use_fneg_x_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_MUL:v[0-9]+]], [[A]], [[B]]
-
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_store_use_fneg_x_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_xor_b32_e32 v3, 0x80000000, v4
+; SI-NEXT: v_mul_f32_e32 v2, v4, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_store_use_fneg_x_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_xor_b32_e32 v3, 0x80000000, v4
+; VI-NEXT: v_mul_f32_e32 v2, v4, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -426,14 +1474,62 @@ define amdgpu_kernel void @v_fneg_mul_store_use_fneg_x_f32(ptr addrspace(1) %out
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_multi_use_fneg_x_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_MUL:v[0-9]+]], [[A]], [[B]]
-; GCN-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
define amdgpu_kernel void @v_fneg_mul_multi_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_mul_multi_use_fneg_x_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s7
+; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0xf
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mul_f32_e64 v3, -v4, s2
+; SI-NEXT: v_mul_f32_e32 v2, v4, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_multi_use_fneg_x_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s7
+; VI-NEXT: v_add_u32_e32 v2, vcc, s6, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x3c
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mul_f32_e64 v3, -v4, s2
+; VI-NEXT: v_mul_f32_e32 v2, v4, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -454,14 +1550,56 @@ define amdgpu_kernel void @v_fneg_mul_multi_use_fneg_x_f32(ptr addrspace(1) %out
; fminnum tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_minnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_A:v[0-9]+]], -1.0, [[A]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_B:v[0-9]+]], -1.0, [[B]]
-; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_minnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v5, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v3, -1.0, v5
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT: v_max_f32_e32 v2, v3, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_minnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v5, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v3, -1.0, v5
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT: v_max_f32_e32 v2, v3, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -475,23 +1613,52 @@ define amdgpu_kernel void @v_fneg_minnum_f32_ieee(ptr addrspace(1) %out, ptr add
ret void
}
-; GCN-LABEL: {{^}}v_fneg_minnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN-NOT: v1
-; GCN: v_max_f32_e64 v0, -v0, -v1
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_minnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_minnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_max_f32_e64 v0, -v0, -v1
+; GCN-NEXT: ; return to shader part epilog
%min = call float @llvm.minnum.f32(float %a, float %b)
%fneg = fneg float %min
ret float %fneg
}
-; GCN-LABEL: {{^}}v_fneg_self_minnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_A:v[0-9]+]], -1.0, [[A]]
-; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_self_minnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_max_f32_e32 v2, v2, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_self_minnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: v_max_f32_e32 v2, v2, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -503,22 +1670,52 @@ define amdgpu_kernel void @v_fneg_self_minnum_f32_ieee(ptr addrspace(1) %out, pt
ret void
}
-; GCN-LABEL: {{^}}v_fneg_self_minnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN: v_max_f32_e64 v0, -v0, -v0
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_self_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_self_minnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_max_f32_e64 v0, -v0, -v0
+; GCN-NEXT: ; return to shader part epilog
%min = call float @llvm.minnum.f32(float %a, float %a)
%min.fneg = fneg float %min
ret float %min.fneg
}
-; GCN-LABEL: {{^}}v_fneg_posk_minnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[QUIET_NEG_A:v[0-9]+]], -1.0, [[A]]
-; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], -4.0, [[QUIET_NEG_A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_posk_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_posk_minnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_max_f32_e32 v2, -4.0, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_posk_minnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: v_max_f32_e32 v2, -4.0, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -530,22 +1727,52 @@ define amdgpu_kernel void @v_fneg_posk_minnum_f32_ieee(ptr addrspace(1) %out, pt
ret void
}
-; GCN-LABEL: {{^}}v_fneg_posk_minnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN: v_max_f32_e64 v0, -v0, -4.0
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_posk_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_posk_minnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_max_f32_e64 v0, -v0, -4.0
+; GCN-NEXT: ; return to shader part epilog
%min = call float @llvm.minnum.f32(float 4.0, float %a)
%fneg = fneg float %min
ret float %fneg
}
-; GCN-LABEL: {{^}}v_fneg_negk_minnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[QUIET_NEG_A:v[0-9]+]], -1.0, [[A]]
-; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], 4.0, [[QUIET_NEG_A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_negk_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_negk_minnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_max_f32_e32 v2, 4.0, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_negk_minnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: v_max_f32_e32 v2, 4.0, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -557,23 +1784,52 @@ define amdgpu_kernel void @v_fneg_negk_minnum_f32_ieee(ptr addrspace(1) %out, pt
ret void
}
-; GCN-LABEL: {{^}}v_fneg_negk_minnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN: v_max_f32_e64 v0, -v0, 4.0
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_negk_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_negk_minnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_max_f32_e64 v0, -v0, 4.0
+; GCN-NEXT: ; return to shader part epilog
%min = call float @llvm.minnum.f32(float -4.0, float %a)
%fneg = fneg float %min
ret float %fneg
}
-; GCN-LABEL: {{^}}v_fneg_0_minnum_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-NOT: [[A]]
-; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], 0, [[A]]
-; GCN: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[MIN]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_0_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_0_minnum_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_min_f32_e32 v2, 0, v3
+; SI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_0_minnum_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_min_f32_e32 v2, 0, v3
+; VI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -585,12 +1841,42 @@ define amdgpu_kernel void @v_fneg_0_minnum_f32(ptr addrspace(1) %out, ptr addrsp
ret void
}
-; GCN-LABEL: {{^}}v_fneg_neg0_minnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[QUIET_NEG_A:v[0-9]+]], -1.0, [[A]]
-; GCN: v_max_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_NEG_A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_neg0_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg0_minnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_max_f32_e32 v2, 0, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_neg0_minnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: v_max_f32_e32 v2, 0, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -602,18 +1888,43 @@ define amdgpu_kernel void @v_fneg_neg0_minnum_f32_ieee(ptr addrspace(1) %out, pt
ret void
}
-; GCN-LABEL: {{^}}v_fneg_inv2pi_minnum_f32:
-; GCN-DAG: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-
-; SI-DAG: v_mul_f32_e32 [[QUIET_NEG:v[0-9]+]], -1.0, [[A]]
-; SI: v_max_f32_e32 [[RESULT:v[0-9]+]], 0xbe22f983, [[QUIET_NEG]]
-
-; VI: v_mul_f32_e32 [[QUIET:v[0-9]+]], 1.0, [[A]]
-; VI: v_min_f32_e32 [[MAX:v[0-9]+]], 0.15915494, [[QUIET]]
-; VI: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[MAX]]
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_max_f32_e32 v2, 0xbe22f983, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, 1.0, v3
+; VI-NEXT: v_min_f32_e32 v2, 0.15915494, v2
+; VI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -625,17 +1936,42 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr a
ret void
}
-; GCN-LABEL: {{^}}v_fneg_neg_inv2pi_minnum_f32:
-; GCN-DAG: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-
-; SI: v_mul_f32_e32 [[NEG_QUIET:v[0-9]+]], -1.0, [[A]]
-; SI: v_max_f32_e32 [[RESULT:v[0-9]+]], 0x3e22f983, [[NEG_QUIET]]
-
-; VI: v_mul_f32_e32 [[NEG_QUIET:v[0-9]+]], -1.0, [[A]]
-; VI: v_max_f32_e32 [[RESULT:v[0-9]+]], 0.15915494, [[NEG_QUIET]]
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg_inv2pi_minnum_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_max_f32_e32 v2, 0x3e22f983, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_neg_inv2pi_minnum_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: v_max_f32_e32 v2, 0.15915494, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -647,19 +1983,44 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f32(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_inv2pi_minnum_f16:
-; GCN-DAG: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
-
-; SI: v_cvt_f32_f16_e64 [[CVT:v[0-9]+]], -[[A]]
-; SI: v_max_f32_e32 [[MAX:v[0-9]+]], 0xbe230000, [[CVT]]
-; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[MAX]]
-
-; VI: v_max_f16_e32 [[QUIET:v[0-9]+]], [[A]], [[A]]
-; VI: v_min_f16_e32 [[MAX:v[0-9]+]], 0.15915494, [[QUIET]]
-; VI: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x8000, [[MAX]]
-
-; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 1, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_ushort v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_cvt_f32_f16_e64 v0, -v0
+; SI-NEXT: v_max_f32_e32 v0, 0xbe230000, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_store_short v[0:1], v3
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 1, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_ushort v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_max_f16_e32 v2, v3, v3
+; VI-NEXT: v_min_f16_e32 v2, 0.15915494, v2
+; VI-NEXT: v_xor_b32_e32 v2, 0x8000, v2
+; VI-NEXT: flat_store_short v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -671,18 +2032,43 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr a
ret void
}
-; GCN-LABEL: {{^}}v_fneg_neg_inv2pi_minnum_f16:
-; GCN-DAG: {{buffer|flat}}_load_ushort [[A:v[0-9]+]]
-
-; SI: v_cvt_f32_f16_e64 [[CVT:v[0-9]+]], -[[A]]
-; SI: v_max_f32_e32 [[MAX:v[0-9]+]], 0x3e230000, [[CVT]]
-; SI: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[MAX]]
-
-; VI: v_max_f16_e64 [[NEG_QUIET:v[0-9]+]], -[[A]], -[[A]]
-; VI: v_max_f16_e32 [[RESULT:v[0-9]+]], 0.15915494, [[NEG_QUIET]]
-
-; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg_inv2pi_minnum_f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 1, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_ushort v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_cvt_f32_f16_e64 v0, -v0
+; SI-NEXT: v_max_f32_e32 v0, 0x3e230000, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_store_short v[0:1], v3
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_neg_inv2pi_minnum_f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 1, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_ushort v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_max_f16_e64 v2, -v3, -v3
+; VI-NEXT: v_max_f16_e32 v2, 0.15915494, v2
+; VI-NEXT: flat_store_short v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -694,19 +2080,45 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f16(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_inv2pi_minnum_f64:
-; GCN-DAG: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
-
-; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0xbfc45f30
-; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0x6dc9c882
-; SI-DAG: v_max_f64 [[NEG_QUIET:v\[[0-9]+:[0-9]+\]]], -[[A]], -[[A]]
-; SI: v_max_f64 v[[[RESULT_LO:[0-9]+]]:[[RESULT_HI:[0-9]+]]], [[NEG_QUIET]], s[[[K_LO]]:[[K_HI]]]
-
-; VI: v_min_f64 v[[[RESULT_LO:[0-9]+]]:[[RESULT_HI:[0-9]+]]], [[A]], 0.15915494
-; VI: v_xor_b32_e32 v[[RESULT_HI]], 0x80000000, v[[RESULT_HI]]
-
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[RESULT_LO]]:[[RESULT_HI]]]
define amdgpu_kernel void @v_fneg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_f64:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_mov_b32 s2, 0x6dc9c882
+; SI-NEXT: s_mov_b32 s3, 0xbfc45f30
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_max_f64 v[0:1], -v[0:1], -v[0:1]
+; SI-NEXT: v_max_f64 v[0:1], v[0:1], s[2:3]
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_f64:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_max_f64 v[0:1], v[0:1], v[0:1]
+; VI-NEXT: v_min_f64 v[0:1], v[0:1], 0.15915494309189532
+; VI-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
+; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -718,19 +2130,44 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr a
ret void
}
-; GCN-LABEL: {{^}}v_fneg_neg_inv2pi_minnum_f64:
-; GCN-DAG: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
-
-; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0x3fc45f30
-; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0x6dc9c882
-; SI-DAG: v_max_f64 [[NEG_QUIET:v\[[0-9]+:[0-9]+\]]], -[[A]], -[[A]]
-; SI: v_max_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[NEG_QUIET]], s[[[K_LO]]:[[K_HI]]]
-
-; VI: v_max_f64 [[NEG_QUIET:v\[[0-9]+:[0-9]+\]]], -[[A]], -[[A]]
-; VI: v_max_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[NEG_QUIET]], 0.15915494
-
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg_inv2pi_minnum_f64:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_mov_b32 s2, 0x6dc9c882
+; SI-NEXT: s_mov_b32 s3, 0x3fc45f30
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_max_f64 v[0:1], -v[0:1], -v[0:1]
+; SI-NEXT: v_max_f64 v[0:1], v[0:1], s[2:3]
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_neg_inv2pi_minnum_f64:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_max_f64 v[0:1], -v[0:1], -v[0:1]
+; VI-NEXT: v_max_f64 v[0:1], v[0:1], 0.15915494309189532
+; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -742,24 +2179,66 @@ define amdgpu_kernel void @v_fneg_neg_inv2pi_minnum_f64(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_neg0_minnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN: v_max_f32_e64 v0, -v0, 0{{$}}
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_neg0_minnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_neg0_minnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_max_f32_e64 v0, -v0, 0
+; GCN-NEXT: ; return to shader part epilog
%min = call float @llvm.minnum.f32(float -0.0, float %a)
%fneg = fneg float %min
ret float %fneg
}
-; GCN-LABEL: {{^}}v_fneg_0_minnum_foldable_use_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[QUIET_A:v[0-9]+]], 1.0, [[A]]
-; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], 0, [[QUIET_A]]
-; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], [[B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_0_minnum_foldable_use_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_0_minnum_foldable_use_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; SI-NEXT: v_min_f32_e32 v2, 0, v2
+; SI-NEXT: v_mul_f32_e64 v2, -v2, v3
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_0_minnum_foldable_use_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; VI-NEXT: v_min_f32_e32 v2, 0, v2
+; VI-NEXT: v_mul_f32_e64 v2, -v2, v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -774,21 +2253,56 @@ define amdgpu_kernel void @v_fneg_0_minnum_foldable_use_f32_ieee(ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}v_fneg_inv2pi_minnum_foldable_use_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-
-; SI: v_mul_f32_e32 [[QUIET_NEG:v[0-9]+]], -1.0, [[A]]
-
-; SI: v_max_f32_e32 [[MIN:v[0-9]+]], 0xbe22f983, [[QUIET_NEG]]
-; SI: v_mul_f32_e32 [[RESULT:v[0-9]+]], [[MIN]], [[B]]
-
-; VI: v_mul_f32_e32 [[QUIET:v[0-9]+]], 1.0, [[A]]
-; VI: v_min_f32_e32 [[MIN:v[0-9]+]], 0.15915494, [[QUIET]]
-; VI: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], [[B]]
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_inv2pi_minnum_foldable_use_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_inv2pi_minnum_foldable_use_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v4
+; SI-NEXT: v_max_f32_e32 v2, 0xbe22f983, v2
+; SI-NEXT: v_mul_f32_e32 v2, v2, v3
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_inv2pi_minnum_foldable_use_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; VI-NEXT: v_min_f32_e32 v2, 0.15915494, v2
+; VI-NEXT: v_mul_f32_e64 v2, -v2, v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -803,31 +2317,74 @@ define amdgpu_kernel void @v_fneg_inv2pi_minnum_foldable_use_f32(ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}v_fneg_0_minnum_foldable_use_f32_no_ieee:
-; GCN-NOT: v0
-; GCN-NOT: v1
-; GCN: v_min_f32_e32 [[MIN:v[0-9]+]], 0, v0
-; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MIN]], v1
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_0_minnum_foldable_use_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_0_minnum_foldable_use_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_min_f32_e32 v0, 0, v0
+; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT: ; return to shader part epilog
%min = call float @llvm.minnum.f32(float 0.0, float %a)
%fneg = fneg float %min
%mul = fmul float %fneg, %b
ret float %mul
}
-; GCN-LABEL: {{^}}v_fneg_minnum_multi_use_minnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_A:v[0-9]+]], -1.0, [[A]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_B:v[0-9]+]], -1.0, [[B]]
-; GCN: v_max_f32_e32 [[MAX0:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
-; GCN-NEXT: v_mul_f32_e32 [[MUL1:v[0-9]+]], -4.0, [[MAX0]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MAX0]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_minnum_multi_use_minnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_minnum_multi_use_minnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_mul_f32_e32 v3, -1.0, v4
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT: v_max_f32_e32 v2, v3, v2
+; SI-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_minnum_multi_use_minnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_mul_f32_e32 v3, -1.0, v4
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT: v_max_f32_e32 v2, v3, v2
+; VI-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -843,13 +2400,12 @@ define amdgpu_kernel void @v_fneg_minnum_multi_use_minnum_f32_ieee(ptr addrspace
ret void
}
-; GCN-LABEL: {{^}}v_fneg_minnum_multi_use_minnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN-NOT: v1
-; GCN: v_max_f32_e64 v0, -v0, -v1
-; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
-; GCN-NEXT: ; return
define amdgpu_ps <2 x float> @v_fneg_minnum_multi_use_minnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_minnum_multi_use_minnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_max_f32_e64 v0, -v0, -v1
+; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
+; GCN-NEXT: ; return to shader part epilog
%min = call float @llvm.minnum.f32(float %a, float %b)
%fneg = fneg float %min
%use1 = fmul float %min, 4.0
@@ -862,15 +2418,56 @@ define amdgpu_ps <2 x float> @v_fneg_minnum_multi_use_minnum_f32_no_ieee(float %
; fmaxnum tests
; --------------------------------------------------------------------------------
-
-; GCN-LABEL: {{^}}v_fneg_maxnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_A:v[0-9]+]], -1.0, [[A]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_B:v[0-9]+]], -1.0, [[B]]
-; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_maxnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v5, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v3, -1.0, v5
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT: v_min_f32_e32 v2, v3, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_maxnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v5, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v3, -1.0, v5
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT: v_min_f32_e32 v2, v3, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -884,23 +2481,52 @@ define amdgpu_kernel void @v_fneg_maxnum_f32_ieee(ptr addrspace(1) %out, ptr add
ret void
}
-; GCN-LABEL: {{^}}v_fneg_maxnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN-NOT: v1
-; GCN: v_min_f32_e64 v0, -v0, -v1
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_maxnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_maxnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_min_f32_e64 v0, -v0, -v1
+; GCN-NEXT: ; return to shader part epilog
%max = call float @llvm.maxnum.f32(float %a, float %b)
%fneg = fneg float %max
ret float %fneg
}
-; GCN-LABEL: {{^}}v_fneg_self_maxnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_A:v[0-9]+]], -1.0, [[A]]
-; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_self_maxnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_min_f32_e32 v2, v2, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_self_maxnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: v_min_f32_e32 v2, v2, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -912,22 +2538,52 @@ define amdgpu_kernel void @v_fneg_self_maxnum_f32_ieee(ptr addrspace(1) %out, pt
ret void
}
-; GCN-LABEL: {{^}}v_fneg_self_maxnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN: v_min_f32_e64 v0, -v0, -v0
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_self_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_self_maxnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_min_f32_e64 v0, -v0, -v0
+; GCN-NEXT: ; return to shader part epilog
%max = call float @llvm.maxnum.f32(float %a, float %a)
%max.fneg = fneg float %max
ret float %max.fneg
}
-; GCN-LABEL: {{^}}v_fneg_posk_maxnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[QUIET_NEG_A:v[0-9]+]], -1.0, [[A]]
-; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], -4.0, [[QUIET_NEG_A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_posk_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_posk_maxnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_min_f32_e32 v2, -4.0, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_posk_maxnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: v_min_f32_e32 v2, -4.0, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -939,22 +2595,52 @@ define amdgpu_kernel void @v_fneg_posk_maxnum_f32_ieee(ptr addrspace(1) %out, pt
ret void
}
-; GCN-LABEL: {{^}}v_fneg_posk_maxnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN: v_min_f32_e64 v0, -v0, -4.0
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_posk_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_posk_maxnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_min_f32_e64 v0, -v0, -4.0
+; GCN-NEXT: ; return to shader part epilog
%max = call float @llvm.maxnum.f32(float 4.0, float %a)
%fneg = fneg float %max
ret float %fneg
}
-; GCN-LABEL: {{^}}v_fneg_negk_maxnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[QUIET_NEG_A:v[0-9]+]], -1.0, [[A]]
-; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], 4.0, [[QUIET_NEG_A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_negk_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_negk_maxnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_min_f32_e32 v2, 4.0, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_negk_maxnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: v_min_f32_e32 v2, 4.0, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -966,23 +2652,52 @@ define amdgpu_kernel void @v_fneg_negk_maxnum_f32_ieee(ptr addrspace(1) %out, pt
ret void
}
-; GCN-LABEL: {{^}}v_fneg_negk_maxnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN: v_min_f32_e64 v0, -v0, 4.0
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_negk_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_negk_maxnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_min_f32_e64 v0, -v0, 4.0
+; GCN-NEXT: ; return to shader part epilog
%max = call float @llvm.maxnum.f32(float -4.0, float %a)
%fneg = fneg float %max
ret float %fneg
}
-; GCN-LABEL: {{^}}v_fneg_0_maxnum_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-NOT: [[A]]
-; GCN: v_max_f32_e32 [[MAX:v[0-9]+]], 0, [[A]]
-; GCN: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[MAX]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_0_maxnum_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_0_maxnum_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_max_f32_e32 v2, 0, v3
+; SI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_0_maxnum_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_max_f32_e32 v2, 0, v3
+; VI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -994,12 +2709,42 @@ define amdgpu_kernel void @v_fneg_0_maxnum_f32(ptr addrspace(1) %out, ptr addrsp
ret void
}
-; GCN-LABEL: {{^}}v_fneg_neg0_maxnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[QUIET_NEG_A:v[0-9]+]], -1.0, [[A]]
-; GCN: v_min_f32_e32 [[RESULT:v[0-9]+]], 0, [[QUIET_NEG_A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_neg0_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_neg0_maxnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: v_min_f32_e32 v2, 0, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_neg0_maxnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: v_min_f32_e32 v2, 0, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1011,24 +2756,66 @@ define amdgpu_kernel void @v_fneg_neg0_maxnum_f32_ieee(ptr addrspace(1) %out, pt
ret void
}
-; GCN-LABEL: {{^}}v_fneg_neg0_maxnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN: v_min_f32_e64 v0, -v0, 0{{$}}
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_neg0_maxnum_f32_no_ieee(float %a) #0 {
+; GCN-LABEL: v_fneg_neg0_maxnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_min_f32_e64 v0, -v0, 0
+; GCN-NEXT: ; return to shader part epilog
%max = call float @llvm.maxnum.f32(float -0.0, float %a)
%fneg = fneg float %max
ret float %fneg
}
-; GCN-LABEL: {{^}}v_fneg_0_maxnum_foldable_use_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[QUIET_A:v[0-9]+]], 1.0, [[A]]
-; GCN: v_max_f32_e32 [[MAX:v[0-9]+]], 0, [[QUIET_A]]
-; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MAX]], [[B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_0_maxnum_foldable_use_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_0_maxnum_foldable_use_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; SI-NEXT: v_max_f32_e32 v2, 0, v2
+; SI-NEXT: v_mul_f32_e64 v2, -v2, v3
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_0_maxnum_foldable_use_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; VI-NEXT: v_max_f32_e32 v2, 0, v2
+; VI-NEXT: v_mul_f32_e64 v2, -v2, v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1043,31 +2830,74 @@ define amdgpu_kernel void @v_fneg_0_maxnum_foldable_use_f32_ieee(ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}v_fneg_0_maxnum_foldable_use_f32_no_ieee:
-; GCN-NOT: v0
-; GCN-NOT: v1
-; GCN: v_max_f32_e32 [[MAX:v[0-9]+]], 0, v0
-; GCN: v_mul_f32_e64 [[RESULT:v[0-9]+]], -[[MAX]], v1
-; GCN-NEXT: ; return
define amdgpu_ps float @v_fneg_0_maxnum_foldable_use_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_0_maxnum_foldable_use_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_max_f32_e32 v0, 0, v0
+; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT: ; return to shader part epilog
%max = call float @llvm.maxnum.f32(float 0.0, float %a)
%fneg = fneg float %max
%mul = fmul float %fneg, %b
ret float %mul
}
-; GCN-LABEL: {{^}}v_fneg_maxnum_multi_use_maxnum_f32_ieee:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_A:v[0-9]+]], -1.0, [[A]]
-; GCN-DAG: v_mul_f32_e32 [[NEG_QUIET_B:v[0-9]+]], -1.0, [[B]]
-; GCN: v_min_f32_e32 [[MAX0:v[0-9]+]], [[NEG_QUIET_A]], [[NEG_QUIET_B]]
-; GCN-NEXT: v_mul_f32_e32 [[MUL1:v[0-9]+]], -4.0, [[MAX0]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MAX0]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_maxnum_multi_use_maxnum_f32_ieee(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_ieee:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_mul_f32_e32 v3, -1.0, v4
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v2
+; SI-NEXT: v_min_f32_e32 v2, v3, v2
+; SI-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_ieee:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_mul_f32_e32 v3, -1.0, v4
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v2
+; VI-NEXT: v_min_f32_e32 v2, v3, v2
+; VI-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1083,13 +2913,12 @@ define amdgpu_kernel void @v_fneg_maxnum_multi_use_maxnum_f32_ieee(ptr addrspace
ret void
}
-; GCN-LABEL: {{^}}v_fneg_maxnum_multi_use_maxnum_f32_no_ieee:
-; GCN-NOT: v0
-; GCN-NOT: v1
-; GCN: v_min_f32_e64 v0, -v0, -v1
-; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
-; GCN-NEXT: ; return
define amdgpu_ps <2 x float> @v_fneg_maxnum_multi_use_maxnum_f32_no_ieee(float %a, float %b) #0 {
+; GCN-LABEL: v_fneg_maxnum_multi_use_maxnum_f32_no_ieee:
+; GCN: ; %bb.0:
+; GCN-NEXT: v_min_f32_e64 v0, -v0, -v1
+; GCN-NEXT: v_mul_f32_e32 v1, -4.0, v0
+; GCN-NEXT: ; return to shader part epilog
%max = call float @llvm.maxnum.f32(float %a, float %b)
%fneg = fneg float %max
%use1 = fmul float %max, 4.0
@@ -1102,17 +2931,116 @@ define amdgpu_ps <2 x float> @v_fneg_maxnum_multi_use_maxnum_f32_no_ieee(float %
; fma tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_fma_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-SAFE: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], [[B]], [[C]]
-; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[RESULT]]
-
-; GCN-NSZ: v_fma_f32 [[RESULT:v[0-9]+]], [[A]], -[[B]], -[[C]]
-; GCN-NSZ-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_fma_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fma_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s6, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v7, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s0, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_fma_f32 v2, v7, v2, v3
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fma_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s6, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v7, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s0, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_fma_f32 v2, v7, -v2, -v3
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fma_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s6, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v7, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s0, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_fma_f32 v2, v7, v2, v3
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fma_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s6, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v7, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s0, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_fma_f32 v2, v7, -v2, -v3
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1128,17 +3056,66 @@ define amdgpu_kernel void @v_fneg_fma_f32(ptr addrspace(1) %out, ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fma_store_use_fma_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN-DAG: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], [[C]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_FMA:v[0-9]+]], 0x80000000, [[FMA]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_FMA]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_fma_store_use_fma_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: v_fneg_fma_store_use_fma_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s7
+; SI-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_fma_f32 v2, v6, v2, v3
+; SI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fma_store_use_fma_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_mov_b32_e32 v5, s7
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_fma_f32 v2, v6, v2, v3
+; VI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1155,23 +3132,128 @@ define amdgpu_kernel void @v_fneg_fma_store_use_fma_f32(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fma_multi_use_fma_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-SAFE: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], [[C]]
-; GCN-SAFE: v_xor_b32_e32 [[NEG_FMA:v[0-9]+]], 0x80000000, [[FMA]]
-; GCN-SAFE: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[FMA]]
-
-; GCN-NSZ: v_fma_f32 [[NEG_FMA:v[0-9]+]], [[A]], -[[B]], -[[C]]
-; GCN-NSZ-NEXT: v_mul_f32_e32 [[MUL:v[0-9]+]], -4.0, [[NEG_FMA]]
-
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_FMA]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_fma_multi_use_fma_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fma_multi_use_fma_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: v_fma_f32 v2, v6, v2, v3
+; SI-SAFE-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; SI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fma_multi_use_fma_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: v_fma_f32 v2, v6, -v2, -v3
+; SI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fma_multi_use_fma_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: v_fma_f32 v2, v6, v2, v3
+; VI-SAFE-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; VI-SAFE-NEXT: v_mul_f32_e32 v2, 4.0, v2
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fma_multi_use_fma_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: v_fma_f32 v2, v6, -v2, -v3
+; VI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1189,18 +3271,116 @@ define amdgpu_kernel void @v_fneg_fma_multi_use_fma_f32(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fma_fneg_x_y_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-SAFE: v_fma_f32 [[FMA:v[0-9]+]], -[[A]], [[B]], [[C]]
-; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[FMA]]
-
-; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], -[[C]]
-; GCN-NSZ-NOT: [[FMA]]
-; GCN-NSZ: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
define amdgpu_kernel void @v_fneg_fma_fneg_x_y_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fma_fneg_x_y_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_fma_f32 v0, -v0, v1, v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fma_fneg_x_y_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_fma_f32 v2, v0, v1, -v2
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fma_fneg_x_y_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_fma_f32 v0, -v0, v1, v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fma_fneg_x_y_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_fma_f32 v2, v0, v1, -v2
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1217,18 +3397,116 @@ define amdgpu_kernel void @v_fneg_fma_fneg_x_y_f32(ptr addrspace(1) %out, ptr ad
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fma_x_fneg_y_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-SAFE: v_fma_f32 [[FMA:v[0-9]+]], [[A]], -[[B]], [[C]]
-; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[FMA]]
-
-; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], -[[C]]
-; GCN-NSZ-NOT: [[FMA]]
-; GCN-NSZ: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
define amdgpu_kernel void @v_fneg_fma_x_fneg_y_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fma_x_fneg_y_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s6, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s4, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v1, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_fma_f32 v0, v0, -v1, v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fma_x_fneg_y_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s6, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s4, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v1, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_fma_f32 v2, v0, v1, -v2
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fma_x_fneg_y_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s6, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s4, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v1, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_fma_f32 v0, v0, -v1, v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fma_x_fneg_y_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s6, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s4, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v1, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_fma_f32 v2, v0, v1, -v2
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1245,18 +3523,116 @@ define amdgpu_kernel void @v_fneg_fma_x_fneg_y_f32(ptr addrspace(1) %out, ptr ad
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fma_fneg_fneg_y_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-SAFE: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], [[C]]
-; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[FMA]]
-
-; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], -[[B]], -[[C]]
-; GCN-NSZ-NOT: [[FMA]]
-; GCN-NSZ: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
define amdgpu_kernel void @v_fneg_fma_fneg_fneg_y_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fma_fneg_fneg_y_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_fma_f32 v0, v0, v1, v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fma_fneg_fneg_y_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_fma_f32 v2, v0, -v1, -v2
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fma_fneg_fneg_y_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_fma_f32 v0, v0, v1, v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fma_fneg_fneg_y_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_fma_f32 v2, v0, -v1, -v2
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1274,18 +3650,116 @@ define amdgpu_kernel void @v_fneg_fma_fneg_fneg_y_f32(ptr addrspace(1) %out, ptr
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fma_fneg_x_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-SAFE: v_fma_f32 [[FMA:v[0-9]+]], -[[A]], [[B]], -[[C]]
-; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[FMA]]
-
-; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], [[C]]
-; GCN-NSZ-NOT: [[FMA]]
-; GCN-NSZ: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
define amdgpu_kernel void @v_fneg_fma_fneg_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fma_fneg_x_fneg_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_fma_f32 v0, -v0, v1, -v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fma_fneg_x_fneg_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_fma_f32 v2, v0, v1, v2
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fma_fneg_x_fneg_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_fma_f32 v0, -v0, v1, -v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fma_fneg_x_fneg_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_fma_f32 v2, v0, v1, v2
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1303,18 +3777,116 @@ define amdgpu_kernel void @v_fneg_fma_fneg_x_fneg_f32(ptr addrspace(1) %out, ptr
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fma_x_y_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-NSZ-SAFE: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], -[[C]]
-; GCN-NSZ-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[FMA]]
-
-; GCN-NSZ: v_fma_f32 [[FMA:v[0-9]+]], [[A]], -[[B]], [[C]]
-; GCN-NSZ-NOT: [[FMA]]
-; GCN-NSZ: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
define amdgpu_kernel void @v_fneg_fma_x_y_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fma_x_y_fneg_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_fma_f32 v0, v0, v1, -v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fma_x_y_fneg_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_fma_f32 v2, v0, -v1, v2
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fma_x_y_fneg_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_fma_f32 v0, v0, v1, -v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v0
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fma_x_y_fneg_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_fma_f32 v2, v0, -v1, v2
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1331,24 +3903,128 @@ define amdgpu_kernel void @v_fneg_fma_x_y_fneg_f32(ptr addrspace(1) %out, ptr ad
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fma_store_use_fneg_x_y_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-SAFE: v_xor_b32
-; GCN-SAFE: v_fma_f32 [[FMA:v[0-9]+]], -[[A]],
-; GCN-SAFE: v_xor_b32
-
-; GCN-NSZ-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN-NSZ-DAG: v_fma_f32 [[FMA:v[0-9]+]], [[A]], [[B]], -[[C]]
-
-; GCN-NSZ-NOT: [[FMA]]
-; GCN-NSZ-NOT: [[NEG_A]]
-; GCN-NSZ: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA]]
-; GCN-NSZ-NOT: [[NEG_A]]
-; GCN-NSZ: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
define amdgpu_kernel void @v_fneg_fma_store_use_fneg_x_y_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fma_store_use_fneg_x_y_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: v_xor_b32_e32 v4, 0x80000000, v6
+; SI-SAFE-NEXT: v_fma_f32 v2, -v6, v2, v3
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v4
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fma_store_use_fneg_x_y_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: v_xor_b32_e32 v4, 0x80000000, v6
+; SI-NSZ-NEXT: v_fma_f32 v2, v6, v2, -v3
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v4
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fma_store_use_fneg_x_y_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: v_xor_b32_e32 v4, 0x80000000, v6
+; VI-SAFE-NEXT: v_fma_f32 v2, -v6, v2, v3
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v4
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fma_store_use_fneg_x_y_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: v_xor_b32_e32 v4, 0x80000000, v6
+; VI-NSZ-NEXT: v_fma_f32 v2, v6, v2, -v3
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v4
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1366,21 +4042,132 @@ define amdgpu_kernel void @v_fneg_fma_store_use_fneg_x_y_f32(ptr addrspace(1) %o
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fma_multi_use_fneg_x_y_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
-; GCN-SAFE-DAG: v_fma_f32 [[FMA:v[0-9]+]]
-; GCN-SAFE-DAG: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[FMA]]
-
-; GCN-NSZ-DAG: v_fma_f32 [[NEG_FMA:v[0-9]+]], [[A]], [[B]], -[[C]]
-; GCN-NSZ-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_FMA]]
-; GCN-NSZ-NEXT: s_waitcnt vmcnt(0)
-; GCN-NSZ-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
-; GCN-NSZ-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_fma_multi_use_fneg_x_y_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, float %d) #0 {
+; SI-SAFE-LABEL: v_fneg_fma_multi_use_fneg_x_y_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_load_dword s0, s[4:5], 0x11
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s11
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s10, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s13
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s12, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s15
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s14, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s8
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s9
+; SI-SAFE-NEXT: v_fma_f32 v2, -v6, v2, v3
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-SAFE-NEXT: v_mul_f32_e64 v3, -v6, s0
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fma_multi_use_fneg_x_y_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_load_dword s0, s[4:5], 0x11
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s11
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s10, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s13
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s12, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s15
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s14, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s8
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s9
+; SI-NSZ-NEXT: v_fma_f32 v2, v6, v2, -v3
+; SI-NSZ-NEXT: v_mul_f32_e64 v3, -v6, s0
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fma_multi_use_fneg_x_y_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_load_dword s0, s[4:5], 0x44
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s11
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s10, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s13
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s12, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s15
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s14, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s8
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s9
+; VI-SAFE-NEXT: v_fma_f32 v2, -v6, v2, v3
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-SAFE-NEXT: v_mul_f32_e64 v3, -v6, s0
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fma_multi_use_fneg_x_y_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_load_dword s0, s[4:5], 0x44
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s11
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s10, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s13
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s12, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s15
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s14, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s8
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s9
+; VI-NSZ-NEXT: v_fma_f32 v2, v6, v2, -v3
+; VI-NSZ-NEXT: v_mul_f32_e64 v3, -v6, s0
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1403,17 +4190,116 @@ define amdgpu_kernel void @v_fneg_fma_multi_use_fneg_x_y_f32(ptr addrspace(1) %o
; fmad tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_fmad_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-SAFE: v_mac_f32_e32 [[C]], [[A]], [[B]]
-; GCN-SAFE: v_xor_b32_e32 v{{[0-9]+}}, 0x80000000, [[C]]
-
-; GCN-NSZ: v_mad_f32 [[RESULT:v[0-9]+]], [[A]], -[[B]], -[[C]]
-; GCN-NSZ-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_fmad_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fmad_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s6, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v7, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s0, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mac_f32_e32 v3, v7, v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v3
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fmad_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s6, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v7, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s0, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mad_f32 v2, v7, -v2, -v3
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fmad_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s6, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v7, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s0, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mac_f32_e32 v3, v7, v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v3
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fmad_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s6, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v7, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s0, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mad_f32 v2, v7, -v2, -v3
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1429,13 +4315,134 @@ define amdgpu_kernel void @v_fneg_fmad_f32(ptr addrspace(1) %out, ptr addrspace(
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fmad_v4f32:
-
-; GCN-NSZ: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}, -v{{[0-9]+}}
-; GCN-NSZ: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}, -v{{[0-9]+}}
-; GCN-NSZ: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}, -v{{[0-9]+}}
-; GCN-NSZ: v_mad_f32 v{{[0-9]+}}, v{{[0-9]+}}, -v{{[0-9]+}}, -v{{[0-9]+}}
define amdgpu_kernel void @v_fneg_fmad_v4f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fmad_v4f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v12, 4, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v12
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v2, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s4, v12
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v2, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v2, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v8, vcc, s6, v12
+; SI-SAFE-NEXT: v_addc_u32_e32 v9, vcc, 0, v2, vcc
+; SI-SAFE-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dwordx4 v[4:7], v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dwordx4 v[8:11], v[8:9] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v13, s1
+; SI-SAFE-NEXT: v_add_i32_e32 v12, vcc, s0, v12
+; SI-SAFE-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; SI-SAFE-NEXT: v_mad_f32 v0, v0, v4, v8
+; SI-SAFE-NEXT: v_mad_f32 v1, v1, v5, v9
+; SI-SAFE-NEXT: v_mad_f32 v2, v2, v6, v10
+; SI-SAFE-NEXT: v_mac_f32_e32 v11, v3, v7
+; SI-SAFE-NEXT: v_xor_b32_e32 v3, 0x80000000, v11
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
+; SI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; SI-SAFE-NEXT: flat_store_dwordx4 v[12:13], v[0:3]
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fmad_v4f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v12, 4, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v12
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v2, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s4, v12
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v2, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v2, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v8, vcc, s6, v12
+; SI-NSZ-NEXT: v_addc_u32_e32 v9, vcc, 0, v2, vcc
+; SI-NSZ-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dwordx4 v[4:7], v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dwordx4 v[8:11], v[8:9] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v13, s1
+; SI-NSZ-NEXT: v_add_i32_e32 v12, vcc, s0, v12
+; SI-NSZ-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; SI-NSZ-NEXT: v_mad_f32 v3, v3, -v7, -v11
+; SI-NSZ-NEXT: v_mad_f32 v2, v2, -v6, -v10
+; SI-NSZ-NEXT: v_mad_f32 v1, v1, -v5, -v9
+; SI-NSZ-NEXT: v_mad_f32 v0, v0, -v4, -v8
+; SI-NSZ-NEXT: flat_store_dwordx4 v[12:13], v[0:3]
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fmad_v4f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v12, 4, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v12
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v2, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s4, v12
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v2, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v2, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v8, vcc, s6, v12
+; VI-SAFE-NEXT: v_addc_u32_e32 v9, vcc, 0, v2, vcc
+; VI-SAFE-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dwordx4 v[4:7], v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dwordx4 v[8:11], v[8:9] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v13, s1
+; VI-SAFE-NEXT: v_add_u32_e32 v12, vcc, s0, v12
+; VI-SAFE-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; VI-SAFE-NEXT: v_mad_f32 v0, v0, v4, v8
+; VI-SAFE-NEXT: v_mad_f32 v1, v1, v5, v9
+; VI-SAFE-NEXT: v_mad_f32 v2, v2, v6, v10
+; VI-SAFE-NEXT: v_mac_f32_e32 v11, v3, v7
+; VI-SAFE-NEXT: v_xor_b32_e32 v3, 0x80000000, v11
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v1, 0x80000000, v1
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; VI-SAFE-NEXT: flat_store_dwordx4 v[12:13], v[0:3]
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fmad_v4f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v12, 4, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v12
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v2, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s4, v12
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v2, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v2, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v8, vcc, s6, v12
+; VI-NSZ-NEXT: v_addc_u32_e32 v9, vcc, 0, v2, vcc
+; VI-NSZ-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dwordx4 v[4:7], v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dwordx4 v[8:11], v[8:9] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v13, s1
+; VI-NSZ-NEXT: v_add_u32_e32 v12, vcc, s0, v12
+; VI-NSZ-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
+; VI-NSZ-NEXT: v_mad_f32 v3, v3, -v7, -v11
+; VI-NSZ-NEXT: v_mad_f32 v2, v2, -v6, -v10
+; VI-NSZ-NEXT: v_mad_f32 v1, v1, -v5, -v9
+; VI-NSZ-NEXT: v_mad_f32 v0, v0, -v4, -v8
+; VI-NSZ-NEXT: flat_store_dwordx4 v[12:13], v[0:3]
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds <4 x float>, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1451,23 +4458,128 @@ define amdgpu_kernel void @v_fneg_fmad_v4f32(ptr addrspace(1) %out, ptr addrspac
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fmad_multi_use_fmad_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN-SAFE: v_mac_f32_e32 [[C]], [[A]], [[B]]
-; GCN-SAFE: v_xor_b32_e32 [[NEG_MAD:v[0-9]+]], 0x80000000, [[C]]
-; GCN-SAFE-NEXT: v_mul_f32_e32 [[MUL:v[0-9]+]], 4.0, [[C]]
-
-; GCN-NSZ: v_mad_f32 [[NEG_MAD:v[0-9]+]], [[A]], -[[B]], -[[C]]
-; GCN-NSZ-NEXT: v_mul_f32_e32 [[MUL:v[0-9]+]], -4.0, [[NEG_MAD]]
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MAD]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_fmad_multi_use_fmad_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_fmad_multi_use_fmad_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: v_mac_f32_e32 v3, v6, v2
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v3
+; SI-SAFE-NEXT: v_mul_f32_e32 v3, 4.0, v3
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_fmad_multi_use_fmad_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: v_mad_f32 v2, v6, -v2, -v3
+; SI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_fmad_multi_use_fmad_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s5
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s7
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s0
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: v_mac_f32_e32 v3, v6, v2
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v3
+; VI-SAFE-NEXT: v_mul_f32_e32 v3, 4.0, v3
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_fmad_multi_use_fmad_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s5
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s7
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s0
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: v_mad_f32 v2, v6, -v2, -v3
+; VI-NSZ-NEXT: v_mul_f32_e32 v3, -4.0, v2
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1489,11 +4601,42 @@ define amdgpu_kernel void @v_fneg_fmad_multi_use_fmad_f32(ptr addrspace(1) %out,
; fp_extend tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_fp_extend_f32_to_f64:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_cvt_f64_f32_e64 [[RESULT:v\[[0-9]+:[0-9]+\]]], -[[A]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_fp_extend_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_extend_f32_to_f64:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v1, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_cvt_f64_f32_e64 v[0:1], -v1
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_extend_f32_to_f64:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v1, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_cvt_f64_f32_e64 v[0:1], -v1
+; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1505,11 +4648,42 @@ define amdgpu_kernel void @v_fneg_fp_extend_f32_to_f64(ptr addrspace(1) %out, pt
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fp_extend_fneg_f32_to_f64:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]]
-; GCN: {{buffer|flat}}_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_extend_fneg_f32_to_f64:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v1, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_extend_fneg_f32_to_f64:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v1, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
+; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1522,13 +4696,50 @@ define amdgpu_kernel void @v_fneg_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %ou
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fp_extend_store_use_fneg_f32_to_f64:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: v_cvt_f64_f32_e32 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[A]]
-; GCN-DAG: v_xor_b32_e32 [[FNEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FNEG_A]]
define amdgpu_kernel void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_extend_store_use_fneg_f32_to_f64:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v4, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v4
+; SI-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v4
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_extend_store_use_fneg_f32_to_f64:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v4, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v4
+; VI-NEXT: v_xor_b32_e32 v4, 0x80000000, v4
+; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v4
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1542,13 +4753,52 @@ define amdgpu_kernel void @v_fneg_fp_extend_store_use_fneg_f32_to_f64(ptr addrsp
ret void
}
-; GCN-LABEL: {{^}}v_fneg_multi_use_fp_extend_fneg_f32_to_f64:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: v_cvt_f64_f32_e32 v[[[CVT_LO:[0-9]+]]:[[CVT_HI:[0-9]+]]], [[A]]
-; GCN-DAG: v_xor_b32_e32 v[[FNEG_A:[0-9]+]], 0x80000000, v[[CVT_HI]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[FNEG_A]]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[CVT_LO]]:[[CVT_HI]]]
define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_use_fp_extend_fneg_f32_to_f64:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v1, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
+; SI-NEXT: v_xor_b32_e32 v5, 0x80000000, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[4:5]
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_multi_use_fp_extend_fneg_f32_to_f64:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v1, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
+; VI-NEXT: v_xor_b32_e32 v5, 0x80000000, v1
+; VI-NEXT: v_mov_b32_e32 v4, v0
+; VI-NEXT: flat_store_dwordx2 v[2:3], v[4:5]
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1561,14 +4811,53 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f32_to_f64(ptr addrsp
ret void
}
-; GCN-LABEL: {{^}}v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: v_cvt_f64_f32_e32 v[[[CVT_LO:[0-9]+]]:[[CVT_HI:[0-9]+]]], [[A]]
-; GCN-DAG: v_xor_b32_e32 v[[FNEG_A:[0-9]+]], 0x80000000, v[[CVT_HI]]
-; GCN-DAG: v_mul_f64 [[MUL:v\[[0-9]+:[0-9]+\]]], v[[[CVT_LO]]:[[CVT_HI]]], 4.0
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[FNEG_A]]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v1, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v2, 3, v0
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v2, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
+; SI-NEXT: v_xor_b32_e32 v5, 0x80000000, v1
+; SI-NEXT: v_mov_b32_e32 v4, v0
+; SI-NEXT: v_mul_f64 v[0:1], v[0:1], 4.0
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[4:5]
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v1, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v6, s1
+; VI-NEXT: v_add_u32_e32 v5, vcc, s0, v0
+; VI-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc
+; VI-NEXT: v_cvt_f64_f32_e32 v[1:2], v1
+; VI-NEXT: v_mul_f64 v[3:4], v[1:2], 4.0
+; VI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT: flat_store_dwordx2 v[5:6], v[1:2]
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dwordx2 v[5:6], v[3:4]
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1583,8 +4872,50 @@ define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f32_to_f64(p
}
; FIXME: Source modifiers not folded for f16->f32
-; GCN-LABEL: {{^}}v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_ushort v1, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_cvt_f32_f16_e64 v4, -v1
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v1
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT: flat_store_dword v[0:1], v4
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_multi_use_fp_extend_fneg_f16_to_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_ushort v1, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_cvt_f32_f16_e32 v3, v1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT: v_xor_b32_e32 v2, 0x80000000, v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1597,8 +4928,52 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_extend_fneg_f16_to_f32(ptr addrsp
ret void
}
-; GCN-LABEL: {{^}}v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_ushort v1, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v1
+; SI-NEXT: v_cvt_f32_f16_e64 v4, -v1
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT: v_mul_f32_e32 v2, 4.0, v3
+; SI-NEXT: flat_store_dword v[0:1], v4
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 1, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_ushort v1, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_cvt_f32_f16_e32 v3, v1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT: v_xor_b32_e32 v2, 0x80000000, v3
+; VI-NEXT: v_mul_f32_e32 v3, 4.0, v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds half, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1616,11 +4991,42 @@ define amdgpu_kernel void @v_fneg_multi_foldable_use_fp_extend_fneg_f16_to_f32(p
; fp_round tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_fp_round_f64_to_f32:
-; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_cvt_f32_f64_e64 [[RESULT:v[0-9]+]], -[[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_fp_round_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_f64_to_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_cvt_f32_f64_e64 v2, -v[1:2]
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_f64_to_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_cvt_f32_f64_e64 v2, -v[1:2]
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1632,11 +5038,42 @@ define amdgpu_kernel void @v_fneg_fp_round_f64_to_f32(ptr addrspace(1) %out, ptr
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fp_round_fneg_f64_to_f32:
-; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
-; GCN: v_cvt_f32_f64_e32 [[RESULT:v[0-9]+]], [[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_fneg_f64_to_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_cvt_f32_f64_e32 v2, v[1:2]
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_fneg_f64_to_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_cvt_f32_f64_e32 v2, v[1:2]
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1649,13 +5086,50 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fp_round_store_use_fneg_f64_to_f32:
-; GCN: {{buffer|flat}}_load_dwordx2 v[[[A_LO:[0-9]+]]:[[A_HI:[0-9]+]]]
-; GCN-DAG: v_cvt_f32_f64_e32 [[RESULT:v[0-9]+]], v[[[A_LO]]:[[A_HI]]]
-; GCN-DAG: v_xor_b32_e32 v[[NEG_A_HI:[0-9]+]], 0x80000000, v[[A_HI]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v[[[A_LO]]:[[NEG_A_HI]]]
define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_store_use_fneg_f64_to_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v4, s1
+; SI-NEXT: v_add_i32_e32 v3, vcc, s0, v0
+; SI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; SI-NEXT: v_cvt_f32_f64_e32 v5, v[1:2]
+; SI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT: flat_store_dword v[3:4], v5
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dwordx2 v[0:1], v[1:2]
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_store_use_fneg_f64_to_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v4, s1
+; VI-NEXT: v_add_u32_e32 v3, vcc, s0, v0
+; VI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; VI-NEXT: v_cvt_f32_f64_e32 v5, v[1:2]
+; VI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT: flat_store_dword v[3:4], v5
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[1:2]
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1669,14 +5143,52 @@ define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f64_to_f32(ptr addrspa
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fp_round_multi_use_fneg_f64_to_f32:
-; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
-; GCN-DAG: v_cvt_f32_f64_e32 [[RESULT:v[0-9]+]], [[A]]
-; GCN-DAG: v_mul_f64 [[USE1:v\[[0-9]+:[0-9]+\]]], -[[A]], s[
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[USE1]]
define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, double %c) #0 {
+; SI-LABEL: v_fneg_fp_round_multi_use_fneg_f64_to_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v4, s1
+; SI-NEXT: v_add_i32_e32 v3, vcc, s0, v0
+; SI-NEXT: v_addc_u32_e32 v4, vcc, 0, v4, vcc
+; SI-NEXT: v_cvt_f32_f64_e32 v5, v[1:2]
+; SI-NEXT: v_mul_f64 v[0:1], -v[1:2], s[4:5]
+; SI-NEXT: flat_store_dword v[3:4], v5
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dwordx2 v[0:1], v[0:1]
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_multi_use_fneg_f64_to_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v5, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_mul_f64 v[3:4], -v[1:2], s[4:5]
+; VI-NEXT: v_cvt_f32_f64_e32 v2, v[1:2]
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dwordx2 v[0:1], v[3:4]
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1691,11 +5203,42 @@ define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f64_to_f32(ptr addrspa
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fp_round_f32_to_f16:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_cvt_f16_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
-; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_fp_round_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_f32_to_f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v1, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_cvt_f16_f32_e64 v3, -v1
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT: flat_store_short v[0:1], v3
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_f32_to_f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v1, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_cvt_f16_f32_e64 v3, -v1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT: flat_store_short v[0:1], v3
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1707,11 +5250,42 @@ define amdgpu_kernel void @v_fneg_fp_round_f32_to_f16(ptr addrspace(1) %out, ptr
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fp_round_fneg_f32_to_f16:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[A]]
-; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_fp_round_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_fneg_f32_to_f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v1, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v1
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; SI-NEXT: flat_store_short v[0:1], v3
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_fneg_f32_to_f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v1, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_cvt_f16_f32_e32 v3, v1
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v2, vcc
+; VI-NEXT: flat_store_short v[0:1], v3
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1724,13 +5298,50 @@ define amdgpu_kernel void @v_fneg_fp_round_fneg_f32_to_f16(ptr addrspace(1) %out
ret void
}
-; GCN-LABEL: {{^}}v_fneg_multi_use_fp_round_fneg_f64_to_f32:
-; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
-; GCN-DAG: v_cvt_f32_f64_e32 [[CVT:v[0-9]+]], [[A]]
-; GCN-DAG: v_xor_b32_e32 [[NEG:v[0-9]+]], 0x80000000, [[CVT]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[CVT]]
define amdgpu_kernel void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_multi_use_fp_round_fneg_f64_to_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_cvt_f32_f64_e32 v2, v[1:2]
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; SI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_multi_use_fp_round_fneg_f64_to_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dwordx2 v[1:2], v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_cvt_f32_f64_e32 v2, v[1:2]
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v3, vcc
+; VI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1743,13 +5354,50 @@ define amdgpu_kernel void @v_fneg_multi_use_fp_round_fneg_f64_to_f32(ptr addrspa
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fp_round_store_use_fneg_f32_to_f16:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[A]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_fp_round_store_use_fneg_f32_to_f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v2, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v2
+; SI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-NEXT: flat_store_short v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_store_use_fneg_f32_to_f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v2, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_cvt_f16_f32_e32 v3, v2
+; VI-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-NEXT: flat_store_short v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1763,13 +5411,52 @@ define amdgpu_kernel void @v_fneg_fp_round_store_use_fneg_f32_to_f16(ptr addrspa
ret void
}
-; GCN-LABEL: {{^}}v_fneg_fp_round_multi_use_fneg_f32_to_f16:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: v_cvt_f16_f32_e32 [[RESULT:v[0-9]+]], [[A]]
-; GCN-DAG: v_mul_f32_e64 [[USE1:v[0-9]+]], -[[A]], s
-; GCN: flat_store_short v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[USE1]]
define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_fp_round_multi_use_fneg_f32_to_f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dword s4, s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; SI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v1
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v2, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v2
+; SI-NEXT: v_mul_f32_e64 v2, -v2, s4
+; SI-NEXT: flat_store_short v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_fp_round_multi_use_fneg_f32_to_f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dword s4, s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v1
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v2, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_cvt_f16_f32_e32 v3, v2
+; VI-NEXT: v_mul_f32_e64 v2, -v2, s4
+; VI-NEXT: flat_store_short v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1788,11 +5475,40 @@ define amdgpu_kernel void @v_fneg_fp_round_multi_use_fneg_f32_to_f16(ptr addrspa
; rcp tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_rcp_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_rcp_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_rcp_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rcp_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_rcp_f32_e64 v3, -v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_rcp_f32_e64 v3, -v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1804,11 +5520,40 @@ define amdgpu_kernel void @v_fneg_rcp_f32(ptr addrspace(1) %out, ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}v_fneg_rcp_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_rcp_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rcp_fneg_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_rcp_f32_e32 v3, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_fneg_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_rcp_f32_e32 v3, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1821,13 +5566,48 @@ define amdgpu_kernel void @v_fneg_rcp_fneg_f32(ptr addrspace(1) %out, ptr addrsp
ret void
}
-; GCN-LABEL: {{^}}v_fneg_rcp_store_use_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
define amdgpu_kernel void @v_fneg_rcp_store_use_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rcp_store_use_fneg_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_rcp_f32_e32 v4, v3
+; SI-NEXT: v_xor_b32_e32 v2, 0x80000000, v3
+; SI-NEXT: flat_store_dword v[0:1], v4
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_store_use_fneg_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_rcp_f32_e32 v4, v3
+; VI-NEXT: v_xor_b32_e32 v2, 0x80000000, v3
+; VI-NEXT: flat_store_dword v[0:1], v4
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1841,13 +5621,50 @@ define amdgpu_kernel void @v_fneg_rcp_store_use_fneg_f32(ptr addrspace(1) %out,
ret void
}
-; GCN-LABEL: {{^}}v_fneg_rcp_multi_use_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN-DAG: v_rcp_f32_e32 [[RESULT:v[0-9]+]], [[A]]
-; GCN-DAG: v_mul_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
define amdgpu_kernel void @v_fneg_rcp_multi_use_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_rcp_multi_use_fneg_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dword s4, s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_rcp_f32_e32 v4, v3
+; SI-NEXT: v_mul_f32_e64 v2, -v3, s4
+; SI-NEXT: flat_store_dword v[0:1], v4
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_rcp_multi_use_fneg_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dword s4, s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_rcp_f32_e32 v4, v3
+; VI-NEXT: v_mul_f32_e64 v2, -v3, s4
+; VI-NEXT: flat_store_dword v[0:1], v4
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1866,12 +5683,52 @@ define amdgpu_kernel void @v_fneg_rcp_multi_use_fneg_f32(ptr addrspace(1) %out,
; fmul_legacy tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_mul_legacy_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_legacy_f32_e64 [[RESULT:v[0-9]+]], [[A]], -[[B]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v5, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_legacy_f32_e64 v2, v5, -v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v5, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_legacy_f32_e64 v2, v5, -v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1885,16 +5742,58 @@ define amdgpu_kernel void @v_fneg_mul_legacy_f32(ptr addrspace(1) %out, ptr addr
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_legacy_store_use_mul_legacy_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], 0x80000000, [[ADD]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL_LEGACY]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_store_use_mul_legacy_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_mul_legacy_f32_e32 v2, v4, v2
+; SI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_store_use_mul_legacy_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_mul_legacy_f32_e32 v2, v4, v2
+; VI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1909,16 +5808,58 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_mul_legacy_f32(ptr addrsp
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_legacy_multi_use_mul_legacy_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_legacy_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
-; GCN-NEXT: v_mul_legacy_f32_e64 [[MUL:v[0-9]+]], -[[ADD]], 4.0
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_multi_use_mul_legacy_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_mul_legacy_f32_e64 v2, v4, -v2
+; SI-NEXT: v_mul_legacy_f32_e64 v3, -v2, 4.0
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_multi_use_mul_legacy_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_mul_legacy_f32_e64 v2, v4, -v2
+; VI-NEXT: v_mul_legacy_f32_e64 v3, -v2, 4.0
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1934,12 +5875,52 @@ define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_mul_legacy_f32(ptr addrsp
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_legacy_fneg_x_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_fneg_x_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_legacy_f32_e32 v2, v0, v1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_fneg_x_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mul_legacy_f32_e32 v2, v0, v1
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1954,12 +5935,52 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_x_f32(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_legacy_x_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_legacy_f32_e32 [[ADD:v[0-9]+]], [[A]], [[B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_x_fneg_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_legacy_f32_e32 v2, v0, v1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_x_fneg_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mul_legacy_f32_e32 v2, v0, v1
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1974,12 +5995,52 @@ define amdgpu_kernel void @v_fneg_mul_legacy_x_fneg_f32(ptr addrspace(1) %out, p
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_legacy_fneg_fneg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_legacy_f32_e64 [[ADD:v[0-9]+]], [[A]], -[[B]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[ADD]]
define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_fneg_fneg_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v1, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_legacy_f32_e64 v2, v0, -v1
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_fneg_fneg_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v1, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mul_legacy_f32_e64 v2, v0, -v1
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -1995,14 +6056,58 @@ define amdgpu_kernel void @v_fneg_mul_legacy_fneg_fneg_f32(ptr addrspace(1) %out
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_legacy_store_use_fneg_x_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_xor_b32_e32 [[NEG_A:v[0-9]+]], 0x80000000, [[A]]
-; GCN-DAG: v_mul_legacy_f32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], [[A]], [[B]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL_LEGACY]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_A]]
define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_mul_legacy_store_use_fneg_x_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_xor_b32_e32 v3, 0x80000000, v4
+; SI-NEXT: v_mul_legacy_f32_e32 v2, v4, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_store_use_fneg_x_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_xor_b32_e32 v3, 0x80000000, v4
+; VI-NEXT: v_mul_legacy_f32_e32 v2, v4, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2018,14 +6123,62 @@ define amdgpu_kernel void @v_fneg_mul_legacy_store_use_fneg_x_f32(ptr addrspace(
ret void
}
-; GCN-LABEL: {{^}}v_fneg_mul_legacy_multi_use_fneg_x_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN-DAG: v_mul_legacy_f32_e32 [[NEG_MUL_LEGACY:v[0-9]+]], [[A]], [[B]]
-; GCN-DAG: v_mul_legacy_f32_e64 [[MUL:v[0-9]+]], -[[A]], s{{[0-9]+}}
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[NEG_MUL_LEGACY]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_fneg_x_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, float %c) #0 {
+; SI-LABEL: v_fneg_mul_legacy_multi_use_fneg_x_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s7
+; SI-NEXT: v_add_i32_e32 v2, vcc, s6, v2
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v4, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_load_dword s2, s[4:5], 0xf
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mul_legacy_f32_e64 v3, -v4, s2
+; SI-NEXT: v_mul_legacy_f32_e32 v2, v4, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_mul_legacy_multi_use_fneg_x_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s7
+; VI-NEXT: v_add_u32_e32 v2, vcc, s6, v2
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v4, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_load_dword s2, s[4:5], 0x3c
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mul_legacy_f32_e64 v3, -v4, s2
+; VI-NEXT: v_mul_legacy_f32_e32 v2, v4, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2046,13 +6199,44 @@ define amdgpu_kernel void @v_fneg_mul_legacy_multi_use_fneg_x_f32(ptr addrspace(
; sin tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_sin_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], 0xbe22f983, [[A]]
-; GCN: v_fract_f32_e32 [[FRACT:v[0-9]+]], [[MUL]]
-; GCN: v_sin_f32_e32 [[RESULT:v[0-9]+]], [[FRACT]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_sin_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_sin_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_mul_f32_e32 v0, 0xbe22f983, v0
+; SI-NEXT: v_fract_f32_e32 v0, v0
+; SI-NEXT: v_sin_f32_e32 v3, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_sin_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_mul_f32_e32 v0, 0xbe22f983, v0
+; VI-NEXT: v_fract_f32_e32 v0, v0
+; VI-NEXT: v_sin_f32_e32 v3, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2064,11 +6248,40 @@ define amdgpu_kernel void @v_fneg_sin_f32(ptr addrspace(1) %out, ptr addrspace(1
ret void
}
-; GCN-LABEL: {{^}}v_fneg_amdgcn_sin_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_sin_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_amdgcn_sin_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_sin_f32_e64 v3, -v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_amdgcn_sin_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_sin_f32_e64 v3, -v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2084,11 +6297,40 @@ define amdgpu_kernel void @v_fneg_amdgcn_sin_f32(ptr addrspace(1) %out, ptr addr
; ftrunc tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_trunc_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_trunc_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_trunc_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_trunc_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_trunc_f32_e64 v2, -v3
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_trunc_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_trunc_f32_e64 v2, -v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2104,18 +6346,100 @@ define amdgpu_kernel void @v_fneg_trunc_f32(ptr addrspace(1) %out, ptr addrspace
; fround tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_round_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_trunc_f32_e32
-; GCN: v_sub_f32_e32
-; GCN: v_cndmask_b32
-
-; GCN-SAFE: v_add_f32_e32 [[ADD:v[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
-; GCN-SAFE: v_xor_b32_e32 [[RESULT:v[0-9]+]], 0x80000000, [[ADD]]
-
-; GCN-NSZ: v_sub_f32_e64 [[RESULT:v[0-9]+]], -v{{[0-9]+}}, v{{[0-9]+}}
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_round_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-SAFE-LABEL: v_fneg_round_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_trunc_f32_e32 v2, v3
+; SI-SAFE-NEXT: v_sub_f32_e32 v4, v3, v2
+; SI-SAFE-NEXT: v_cmp_ge_f32_e64 s[0:1], |v4|, 0.5
+; SI-SAFE-NEXT: v_cndmask_b32_e64 v4, 0, 1.0, s[0:1]
+; SI-SAFE-NEXT: s_brev_b32 s0, -2
+; SI-SAFE-NEXT: v_bfi_b32 v3, s0, v4, v3
+; SI-SAFE-NEXT: v_add_f32_e32 v2, v2, v3
+; SI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: v_fneg_round_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_trunc_f32_e32 v2, v3
+; SI-NSZ-NEXT: v_sub_f32_e32 v4, v3, v2
+; SI-NSZ-NEXT: v_cmp_ge_f32_e64 s[0:1], |v4|, 0.5
+; SI-NSZ-NEXT: v_cndmask_b32_e64 v4, 0, 1.0, s[0:1]
+; SI-NSZ-NEXT: s_brev_b32 s0, -2
+; SI-NSZ-NEXT: v_bfi_b32 v3, s0, v4, v3
+; SI-NSZ-NEXT: v_sub_f32_e64 v2, -v2, v3
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: v_fneg_round_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s3
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s1
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_trunc_f32_e32 v2, v3
+; VI-SAFE-NEXT: v_sub_f32_e32 v4, v3, v2
+; VI-SAFE-NEXT: v_cmp_ge_f32_e64 s[0:1], |v4|, 0.5
+; VI-SAFE-NEXT: v_cndmask_b32_e64 v4, 0, 1.0, s[0:1]
+; VI-SAFE-NEXT: s_brev_b32 s0, -2
+; VI-SAFE-NEXT: v_bfi_b32 v3, s0, v4, v3
+; VI-SAFE-NEXT: v_add_f32_e32 v2, v2, v3
+; VI-SAFE-NEXT: v_xor_b32_e32 v2, 0x80000000, v2
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: v_fneg_round_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s3
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s1
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_trunc_f32_e32 v2, v3
+; VI-NSZ-NEXT: v_sub_f32_e32 v4, v3, v2
+; VI-NSZ-NEXT: v_cmp_ge_f32_e64 s[0:1], |v4|, 0.5
+; VI-NSZ-NEXT: v_cndmask_b32_e64 v4, 0, 1.0, s[0:1]
+; VI-NSZ-NEXT: s_brev_b32 s0, -2
+; VI-NSZ-NEXT: v_bfi_b32 v3, s0, v4, v3
+; VI-NSZ-NEXT: v_sub_f32_e64 v2, -v2, v3
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2131,11 +6455,40 @@ define amdgpu_kernel void @v_fneg_round_f32(ptr addrspace(1) %out, ptr addrspace
; rint tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_rint_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_rint_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_rint_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_rndne_f32_e64 v2, -v3
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_rint_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_rndne_f32_e64 v2, -v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2151,11 +6504,40 @@ define amdgpu_kernel void @v_fneg_rint_f32(ptr addrspace(1) %out, ptr addrspace(
; nearbyint tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_nearbyint_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_rndne_f32_e64 [[RESULT:v[0-9]+]], -[[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_nearbyint_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_nearbyint_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_rndne_f32_e64 v2, -v3
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_nearbyint_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_rndne_f32_e64 v2, -v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2171,11 +6553,40 @@ define amdgpu_kernel void @v_fneg_nearbyint_f32(ptr addrspace(1) %out, ptr addrs
; fcanonicalize tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_canonicalize_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[RESULT:v[0-9]+]], -1.0, [[A]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[RESULT]]
define amdgpu_kernel void @v_fneg_canonicalize_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) #0 {
+; SI-LABEL: v_fneg_canonicalize_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: flat_load_dword v3, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v2
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_canonicalize_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: flat_load_dword v3, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v2
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, -1.0, v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2191,13 +6602,64 @@ define amdgpu_kernel void @v_fneg_canonicalize_f32(ptr addrspace(1) %out, ptr ad
; vintrp tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_interp_p1_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[A]], -[[B]]
-; GCN: v_interp_p1_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
-; GCN: v_interp_p1_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
define amdgpu_kernel void @v_fneg_interp_p1_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_interp_p1_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v5, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e64 v2, v5, -v2
+; SI-NEXT: v_interp_p1_f32 v3, v2, attr0.x
+; SI-NEXT: v_interp_p1_f32 v2, v2, attr0.y
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_interp_p1_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_mov_b32 m0, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v5, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e64 v2, v5, -v2
+; VI-NEXT: v_interp_p1_f32_e32 v3, v2, attr0.x
+; VI-NEXT: v_interp_p1_f32_e32 v2, v2, attr0.y
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2214,13 +6676,68 @@ define amdgpu_kernel void @v_fneg_interp_p1_f32(ptr addrspace(1) %out, ptr addrs
ret void
}
-; GCN-LABEL: {{^}}v_fneg_interp_p2_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[A]], -[[B]]
-; GCN: v_interp_p2_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
-; GCN: v_interp_p2_f32{{(_e32)?}} v{{[0-9]+}}, [[MUL]]
define amdgpu_kernel void @v_fneg_interp_p2_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
+; SI-LABEL: v_fneg_interp_p2_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0xd
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: v_mov_b32_e32 v6, 4.0
+; SI-NEXT: s_mov_b32 m0, 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: flat_load_dword v5, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT: v_mov_b32_e32 v3, 4.0
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e64 v2, v5, -v2
+; SI-NEXT: v_interp_p2_f32 v6, v2, attr0.x
+; SI-NEXT: v_interp_p2_f32 v3, v2, attr0.y
+; SI-NEXT: flat_store_dword v[0:1], v6
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_interp_p2_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x34
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: v_mov_b32_e32 v6, 4.0
+; VI-NEXT: s_mov_b32 m0, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dword v5, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT: v_mov_b32_e32 v3, 4.0
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e64 v2, v5, -v2
+; VI-NEXT: v_interp_p2_f32_e32 v6, v2, attr0.x
+; VI-NEXT: v_interp_p2_f32_e32 v3, v2, attr0.y
+; VI-NEXT: flat_store_dword v[0:1], v6
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2241,21 +6758,86 @@ define amdgpu_kernel void @v_fneg_interp_p2_f32(ptr addrspace(1) %out, ptr addrs
; CopyToReg tests
; --------------------------------------------------------------------------------
-; GCN-LABEL: {{^}}v_fneg_copytoreg_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[MUL0:v[0-9]+]], [[A]], [[B]]
-; GCN: s_cbranch_scc0
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL0]]
-; GCN: s_endpgm
-
-; GCN: v_xor_b32_e32 [[XOR:v[0-9]+]], 0x80000000, [[MUL0]]
-; GCN: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[XOR]], [[C]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
-
define amdgpu_kernel void @v_fneg_copytoreg_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
+; SI-LABEL: v_fneg_copytoreg_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; SI-NEXT: s_load_dword s0, s[4:5], 0x11
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s11
+; SI-NEXT: v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s13
+; SI-NEXT: v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s15
+; SI-NEXT: v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT: flat_load_dword v7, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v3, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[4:5] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s9
+; SI-NEXT: v_add_i32_e32 v0, vcc, s8, v6
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: s_cmp_lg_u32 s0, 0
+; SI-NEXT: v_mul_f32_e32 v3, v7, v3
+; SI-NEXT: s_cbranch_scc0 .LBB105_2
+; SI-NEXT: ; %bb.1: ; %endif
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+; SI-NEXT: .LBB105_2: ; %if
+; SI-NEXT: v_xor_b32_e32 v4, 0x80000000, v3
+; SI-NEXT: v_mul_f32_e32 v2, v4, v2
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_copytoreg_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; VI-NEXT: s_load_dword s0, s[4:5], 0x44
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s11
+; VI-NEXT: v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_mov_b32_e32 v5, s15
+; VI-NEXT: v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT: flat_load_dword v7, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v3, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[4:5] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s9
+; VI-NEXT: v_add_u32_e32 v0, vcc, s8, v6
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: s_cmp_lg_u32 s0, 0
+; VI-NEXT: v_mul_f32_e32 v3, v7, v3
+; VI-NEXT: s_cbranch_scc0 .LBB105_2
+; VI-NEXT: ; %bb.1: ; %endif
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
+; VI-NEXT: .LBB105_2: ; %if
+; VI-NEXT: v_xor_b32_e32 v4, 0x80000000, v3
+; VI-NEXT: v_mul_f32_e32 v2, v4, v2
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2285,13 +6867,68 @@ endif:
; --------------------------------------------------------------------------------
; Can't fold into use, so should fold into source
-; GCN-LABEL: {{^}}v_fneg_inlineasm_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e64 [[MUL:v[0-9]+]], [[A]], -[[B]]
-; GCN: ; use [[MUL]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
define amdgpu_kernel void @v_fneg_inlineasm_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
+; SI-LABEL: v_fneg_inlineasm_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s7
+; SI-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e64 v2, v6, -v2
+; SI-NEXT: ;;#ASMSTART
+; SI-NEXT: ; use v2
+; SI-NEXT: ;;#ASMEND
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_inlineasm_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_mov_b32_e32 v5, s7
+; VI-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e64 v2, v6, -v2
+; VI-NEXT: ;;#ASMSTART
+; VI-NEXT: ; use v2
+; VI-NEXT: ;;#ASMEND
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2313,14 +6950,70 @@ define amdgpu_kernel void @v_fneg_inlineasm_f32(ptr addrspace(1) %out, ptr addrs
; --------------------------------------------------------------------------------
; Can't fold into use, so should fold into source
-; GCN-LABEL: {{^}}v_fneg_inlineasm_multi_use_src_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: v_mul_f32_e32 [[MUL:v[0-9]+]], [[A]], [[B]]
-; GCN: v_xor_b32_e32 [[NEG:v[0-9]+]], 0x80000000, [[MUL]]
-; GCN: ; use [[NEG]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL]]
define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, i32 %d) #0 {
+; SI-LABEL: v_fneg_inlineasm_multi_use_src_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s7
+; SI-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mul_f32_e32 v2, v6, v2
+; SI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; SI-NEXT: ;;#ASMSTART
+; SI-NEXT: ; use v3
+; SI-NEXT: ;;#ASMEND
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_inlineasm_multi_use_src_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_mov_b32_e32 v5, s7
+; VI-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v5, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mul_f32_e32 v2, v6, v2
+; VI-NEXT: v_xor_b32_e32 v3, 0x80000000, v2
+; VI-NEXT: ;;#ASMSTART
+; VI-NEXT: ; use v3
+; VI-NEXT: ;;#ASMEND
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2343,19 +7036,66 @@ define amdgpu_kernel void @v_fneg_inlineasm_multi_use_src_f32(ptr addrspace(1) %
; There are multiple users of the fneg that must use a VOP3
; instruction, so there is no penalty
-; GCN-LABEL: {{^}}multiuse_fneg_2_vop3_users_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN: v_fma_f32 [[FMA0:v[0-9]+]], -[[A]], [[B]], [[C]]
-; GCN-NEXT: v_fma_f32 [[FMA1:v[0-9]+]], -[[A]], [[C]], 2.0
-
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA1]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: multiuse_fneg_2_vop3_users_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s7
+; SI-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_fma_f32 v2, -v6, v2, v3
+; SI-NEXT: v_fma_f32 v3, -v6, v3, 2.0
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: multiuse_fneg_2_vop3_users_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_mov_b32_e32 v5, s7
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_fma_f32 v2, -v6, v2, v3
+; VI-NEXT: v_fma_f32 v3, -v6, v3, 2.0
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2377,19 +7117,66 @@ define amdgpu_kernel void @multiuse_fneg_2_vop3_users_f32(ptr addrspace(1) %out,
; There are multiple users, but both require using a larger encoding
; for the modifier.
-
-; GCN-LABEL: {{^}}multiuse_fneg_2_vop2_users_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN: v_mul_f32_e64 [[MUL0:v[0-9]+]], -[[A]], [[B]]
-; GCN: v_mul_f32_e64 [[MUL1:v[0-9]+]], -[[A]], [[C]]
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL0]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: multiuse_fneg_2_vop2_users_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s7
+; SI-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_mul_f32_e64 v2, -v6, v2
+; SI-NEXT: v_mul_f32_e64 v3, -v6, v3
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: multiuse_fneg_2_vop2_users_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_mov_b32_e32 v5, s7
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_mul_f32_e64 v2, -v6, v2
+; VI-NEXT: v_mul_f32_e64 v3, -v6, v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2410,19 +7197,66 @@ define amdgpu_kernel void @multiuse_fneg_2_vop2_users_f32(ptr addrspace(1) %out,
}
; One user is VOP3 so has no cost to folding the modifier, the other does.
-; GCN-LABEL: {{^}}multiuse_fneg_vop2_vop3_users_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-
-; GCN: v_fma_f32 [[FMA0:v[0-9]+]], -[[A]], [[B]], 2.0
-; GCN: v_mul_f32_e64 [[MUL1:v[0-9]+]], -[[A]], [[C]]
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @multiuse_fneg_vop2_vop3_users_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr) #0 {
+; SI-LABEL: multiuse_fneg_vop2_vop3_users_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_add_i32_e32 v0, vcc, s2, v4
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_add_i32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s7
+; SI-NEXT: v_add_i32_e32 v4, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT: flat_load_dword v6, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: v_mov_b32_e32 v1, s1
+; SI-NEXT: v_fma_f32 v2, -v6, v2, 2.0
+; SI-NEXT: v_mul_f32_e64 v3, -v6, v3
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: multiuse_fneg_vop2_vop3_users_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v4, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_mov_b32_e32 v5, s7
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT: flat_load_dword v6, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: v_mov_b32_e32 v1, s1
+; VI-NEXT: v_fma_f32 v2, -v6, v2, 2.0
+; VI-NEXT: v_mul_f32_e64 v3, -v6, v3
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2444,26 +7278,154 @@ define amdgpu_kernel void @multiuse_fneg_vop2_vop3_users_f32(ptr addrspace(1) %o
; The use of the fneg requires a code size increase, but folding into
; the source does not
-
-; GCN-LABEL: {{^}}free_fold_src_code_size_cost_use_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[D:v[0-9]+]]
-
-; GCN-SAFE: v_fma_f32 [[FMA0:v[0-9]+]], [[A]], [[B]], 2.0
-; GCN-SAFE-DAG: v_mul_f32_e64 [[MUL1:v[0-9]+]], -[[FMA0]], [[C]]
-; GCN-SAFE-DAG: v_mul_f32_e64 [[MUL2:v[0-9]+]], -[[FMA0]], [[D]]
-
-; GCN-NSZ: v_fma_f32 [[FMA0:v[0-9]+]], [[A]], -[[B]], -2.0
-; GCN-NSZ-DAG: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[FMA0]], [[C]]
-; GCN-NSZ-DAG: v_mul_f32_e32 [[MUL2:v[0-9]+]], [[FMA0]], [[D]]
-
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL2]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @free_fold_src_code_size_cost_use_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-SAFE-LABEL: free_fold_src_code_size_cost_use_f32:
+; SI-SAFE: ; %bb.0:
+; SI-SAFE-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-SAFE-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-SAFE-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; SI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s11
+; SI-SAFE-NEXT: v_add_i32_e32 v0, vcc, s10, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v3, s13
+; SI-SAFE-NEXT: v_add_i32_e32 v2, vcc, s12, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v5, s15
+; SI-SAFE-NEXT: v_add_i32_e32 v4, vcc, s14, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-SAFE-NEXT: v_mov_b32_e32 v7, s1
+; SI-SAFE-NEXT: v_add_i32_e32 v6, vcc, s0, v6
+; SI-SAFE-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; SI-SAFE-NEXT: flat_load_dword v8, v[0:1] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_load_dword v4, v[6:7] glc
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: v_mov_b32_e32 v0, s8
+; SI-SAFE-NEXT: v_mov_b32_e32 v1, s9
+; SI-SAFE-NEXT: v_fma_f32 v2, v8, v2, 2.0
+; SI-SAFE-NEXT: v_mul_f32_e64 v3, -v2, v3
+; SI-SAFE-NEXT: v_mul_f32_e64 v2, -v2, v4
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; SI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; SI-SAFE-NEXT: s_endpgm
+;
+; SI-NSZ-LABEL: free_fold_src_code_size_cost_use_f32:
+; SI-NSZ: ; %bb.0:
+; SI-NSZ-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NSZ-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NSZ-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; SI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s11
+; SI-NSZ-NEXT: v_add_i32_e32 v0, vcc, s10, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v3, s13
+; SI-NSZ-NEXT: v_add_i32_e32 v2, vcc, s12, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v5, s15
+; SI-NSZ-NEXT: v_add_i32_e32 v4, vcc, s14, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NSZ-NEXT: v_mov_b32_e32 v7, s1
+; SI-NSZ-NEXT: v_add_i32_e32 v6, vcc, s0, v6
+; SI-NSZ-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; SI-NSZ-NEXT: flat_load_dword v8, v[0:1] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_load_dword v4, v[6:7] glc
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: v_mov_b32_e32 v0, s8
+; SI-NSZ-NEXT: v_mov_b32_e32 v1, s9
+; SI-NSZ-NEXT: v_fma_f32 v2, v8, -v2, -2.0
+; SI-NSZ-NEXT: v_mul_f32_e32 v3, v2, v3
+; SI-NSZ-NEXT: v_mul_f32_e32 v2, v2, v4
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; SI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; SI-NSZ-NEXT: s_endpgm
+;
+; VI-SAFE-LABEL: free_fold_src_code_size_cost_use_f32:
+; VI-SAFE: ; %bb.0:
+; VI-SAFE-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-SAFE-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-SAFE-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; VI-SAFE-NEXT: s_waitcnt lgkmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s11
+; VI-SAFE-NEXT: v_add_u32_e32 v0, vcc, s10, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v3, s13
+; VI-SAFE-NEXT: v_add_u32_e32 v2, vcc, s12, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v5, s15
+; VI-SAFE-NEXT: v_add_u32_e32 v4, vcc, s14, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-SAFE-NEXT: v_mov_b32_e32 v7, s1
+; VI-SAFE-NEXT: v_add_u32_e32 v6, vcc, s0, v6
+; VI-SAFE-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; VI-SAFE-NEXT: flat_load_dword v8, v[0:1] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_load_dword v4, v[6:7] glc
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: v_mov_b32_e32 v0, s8
+; VI-SAFE-NEXT: v_mov_b32_e32 v1, s9
+; VI-SAFE-NEXT: v_fma_f32 v2, v8, v2, 2.0
+; VI-SAFE-NEXT: v_mul_f32_e64 v3, -v2, v3
+; VI-SAFE-NEXT: v_mul_f32_e64 v2, -v2, v4
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v3
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: flat_store_dword v[0:1], v2
+; VI-SAFE-NEXT: s_waitcnt vmcnt(0)
+; VI-SAFE-NEXT: s_endpgm
+;
+; VI-NSZ-LABEL: free_fold_src_code_size_cost_use_f32:
+; VI-NSZ: ; %bb.0:
+; VI-NSZ-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NSZ-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NSZ-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; VI-NSZ-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s11
+; VI-NSZ-NEXT: v_add_u32_e32 v0, vcc, s10, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v3, s13
+; VI-NSZ-NEXT: v_add_u32_e32 v2, vcc, s12, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v5, s15
+; VI-NSZ-NEXT: v_add_u32_e32 v4, vcc, s14, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NSZ-NEXT: v_mov_b32_e32 v7, s1
+; VI-NSZ-NEXT: v_add_u32_e32 v6, vcc, s0, v6
+; VI-NSZ-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; VI-NSZ-NEXT: flat_load_dword v8, v[0:1] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_load_dword v4, v[6:7] glc
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: v_mov_b32_e32 v0, s8
+; VI-NSZ-NEXT: v_mov_b32_e32 v1, s9
+; VI-NSZ-NEXT: v_fma_f32 v2, v8, -v2, -2.0
+; VI-NSZ-NEXT: v_mul_f32_e32 v3, v2, v3
+; VI-NSZ-NEXT: v_mul_f32_e32 v2, v2, v4
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v3
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: flat_store_dword v[0:1], v2
+; VI-NSZ-NEXT: s_waitcnt vmcnt(0)
+; VI-NSZ-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2486,21 +7448,80 @@ define amdgpu_kernel void @free_fold_src_code_size_cost_use_f32(ptr addrspace(1)
ret void
}
-; GCN-LABEL: {{^}}free_fold_src_code_size_cost_use_f64:
-; GCN: {{buffer|flat}}_load_dwordx2 [[A:v\[[0-9]+:[0-9]+\]]]
-; GCN: {{buffer|flat}}_load_dwordx2 [[B:v\[[0-9]+:[0-9]+\]]]
-; GCN: {{buffer|flat}}_load_dwordx2 [[C:v\[[0-9]+:[0-9]+\]]]
-; GCN: {{buffer|flat}}_load_dwordx2 [[D:v\[[0-9]+:[0-9]+\]]]
-
-; GCN: v_fma_f64 [[FMA0:v\[[0-9]+:[0-9]+\]]], [[A]], [[B]], 2.0
-; GCN-DAG: v_mul_f64 [[MUL0:v\[[0-9]+:[0-9]+\]]], -[[FMA0]], [[C]]
-; GCN-DAG: v_mul_f64 [[MUL1:v\[[0-9]+:[0-9]+\]]], -[[FMA0]], [[D]]
-
-; GCN: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[MUL0]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
-; GCN-NEXT: flat_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
-; GCN-NEXT: s_waitcnt vmcnt(0)
define amdgpu_kernel void @free_fold_src_code_size_cost_use_f64(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-LABEL: free_fold_src_code_size_cost_use_f64:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NEXT: v_lshlrev_b32_e32 v6, 3, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s11
+; SI-NEXT: v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s13
+; SI-NEXT: v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s15
+; SI-NEXT: v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dwordx2 v[2:3], v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v7, s1
+; SI-NEXT: v_add_i32_e32 v6, vcc, s0, v6
+; SI-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; SI-NEXT: flat_load_dwordx2 v[4:5], v[4:5] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dwordx2 v[6:7], v[6:7] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], 2.0
+; SI-NEXT: v_mov_b32_e32 v2, s8
+; SI-NEXT: v_mov_b32_e32 v3, s9
+; SI-NEXT: v_mul_f64 v[4:5], -v[0:1], v[4:5]
+; SI-NEXT: v_mul_f64 v[0:1], -v[0:1], v[6:7]
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[4:5]
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: free_fold_src_code_size_cost_use_f64:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NEXT: v_lshlrev_b32_e32 v6, 3, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s11
+; VI-NEXT: v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v5, s15
+; VI-NEXT: v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT: v_mov_b32_e32 v7, s1
+; VI-NEXT: v_add_u32_e32 v6, vcc, s0, v6
+; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; VI-NEXT: flat_load_dwordx2 v[4:5], v[4:5] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dwordx2 v[6:7], v[6:7] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_fma_f64 v[0:1], v[0:1], v[2:3], 2.0
+; VI-NEXT: v_mul_f64 v[2:3], -v[0:1], v[4:5]
+; VI-NEXT: v_mul_f64 v[0:1], -v[0:1], v[6:7]
+; VI-NEXT: v_mov_b32_e32 v4, s8
+; VI-NEXT: v_mov_b32_e32 v5, s9
+; VI-NEXT: flat_store_dwordx2 v[4:5], v[2:3]
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds double, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2525,15 +7546,74 @@ define amdgpu_kernel void @free_fold_src_code_size_cost_use_f64(ptr addrspace(1)
; %trunc.a has one fneg use, but it requires a code size increase and
; %the fneg can instead be folded for free into the fma.
-
-; GCN-LABEL: {{^}}one_use_cost_to_fold_into_src_f32:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN: v_trunc_f32_e32 [[TRUNC_A:v[0-9]+]], [[A]]
-; GCN: v_fma_f32 [[FMA0:v[0-9]+]], -[[TRUNC_A]], [[B]], [[C]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-LABEL: one_use_cost_to_fold_into_src_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s11
+; SI-NEXT: v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s13
+; SI-NEXT: v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s15
+; SI-NEXT: v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT: flat_load_dword v8, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v7, s1
+; SI-NEXT: v_add_i32_e32 v0, vcc, s0, v6
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v7, vcc
+; SI-NEXT: flat_load_dword v0, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_trunc_f32_e32 v0, v8
+; SI-NEXT: v_fma_f32 v2, -v0, v2, v3
+; SI-NEXT: v_mov_b32_e32 v0, s8
+; SI-NEXT: v_mov_b32_e32 v1, s9
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: one_use_cost_to_fold_into_src_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s11
+; VI-NEXT: v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_mov_b32_e32 v5, s15
+; VI-NEXT: v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT: flat_load_dword v8, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v7, s1
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v6
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v7, vcc
+; VI-NEXT: flat_load_dword v0, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_trunc_f32_e32 v0, v8
+; VI-NEXT: v_fma_f32 v2, -v0, v2, v3
+; VI-NEXT: v_mov_b32_e32 v0, s8
+; VI-NEXT: v_mov_b32_e32 v1, s9
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2553,17 +7633,80 @@ define amdgpu_kernel void @one_use_cost_to_fold_into_src_f32(ptr addrspace(1) %o
ret void
}
-; GCN-LABEL: {{^}}multi_use_cost_to_fold_into_src:
-; GCN: {{buffer|flat}}_load_dword [[A:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[B:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[C:v[0-9]+]]
-; GCN: {{buffer|flat}}_load_dword [[D:v[0-9]+]]
-; GCN: v_trunc_f32_e32 [[TRUNC_A:v[0-9]+]], [[A]]
-; GCN-DAG: v_fma_f32 [[FMA0:v[0-9]+]], -[[TRUNC_A]], [[B]], [[C]]
-; GCN-DAG: v_mul_f32_e32 [[MUL1:v[0-9]+]], [[TRUNC_A]], [[D]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[FMA0]]
-; GCN: flat_store_dword v{{\[[0-9]+:[0-9]+\]}}, [[MUL1]]
define amdgpu_kernel void @multi_use_cost_to_fold_into_src(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr, ptr addrspace(1) %c.ptr, ptr addrspace(1) %d.ptr) #0 {
+; SI-LABEL: multi_use_cost_to_fold_into_src:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x9
+; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x11
+; SI-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s11
+; SI-NEXT: v_add_i32_e32 v0, vcc, s10, v6
+; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; SI-NEXT: v_mov_b32_e32 v3, s13
+; SI-NEXT: v_add_i32_e32 v2, vcc, s12, v6
+; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; SI-NEXT: v_mov_b32_e32 v5, s15
+; SI-NEXT: v_add_i32_e32 v4, vcc, s14, v6
+; SI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; SI-NEXT: v_mov_b32_e32 v7, s1
+; SI-NEXT: v_add_i32_e32 v6, vcc, s0, v6
+; SI-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; SI-NEXT: flat_load_dword v8, v[0:1] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v2, v[2:3] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v3, v[4:5] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_load_dword v4, v[6:7] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v0, s8
+; SI-NEXT: v_mov_b32_e32 v1, s9
+; SI-NEXT: v_trunc_f32_e32 v5, v8
+; SI-NEXT: v_fma_f32 v2, -v5, v2, v3
+; SI-NEXT: v_mul_f32_e32 v3, v5, v4
+; SI-NEXT: flat_store_dword v[0:1], v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: flat_store_dword v[0:1], v3
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: multi_use_cost_to_fold_into_src:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x44
+; VI-NEXT: v_lshlrev_b32_e32 v6, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s11
+; VI-NEXT: v_add_u32_e32 v0, vcc, s10, v6
+; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; VI-NEXT: v_mov_b32_e32 v3, s13
+; VI-NEXT: v_add_u32_e32 v2, vcc, s12, v6
+; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
+; VI-NEXT: v_mov_b32_e32 v5, s15
+; VI-NEXT: v_add_u32_e32 v4, vcc, s14, v6
+; VI-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; VI-NEXT: v_mov_b32_e32 v7, s1
+; VI-NEXT: v_add_u32_e32 v6, vcc, s0, v6
+; VI-NEXT: v_addc_u32_e32 v7, vcc, 0, v7, vcc
+; VI-NEXT: flat_load_dword v8, v[0:1] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v2, v[2:3] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v3, v[4:5] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_load_dword v4, v[6:7] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v0, s8
+; VI-NEXT: v_mov_b32_e32 v1, s9
+; VI-NEXT: v_trunc_f32_e32 v5, v8
+; VI-NEXT: v_fma_f32 v2, -v5, v2, v3
+; VI-NEXT: v_mul_f32_e32 v3, v5, v4
+; VI-NEXT: flat_store_dword v[0:1], v2
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: flat_store_dword v[0:1], v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2589,16 +7732,18 @@ define amdgpu_kernel void @multi_use_cost_to_fold_into_src(ptr addrspace(1) %out
; undone by the generic combine to pull the fneg out of the fma if
; !isFNegFree. We were reporting false for v2f32 even though it will
; be split into f32 where it will be free.
-; GCN-LABEL: {{^}}fneg_fma_fneg_dagcombine_loop:
-; GCN: s_brev_b32 [[NEGZERO:s[0-9]+]], 1{{$}}
-; GCN-DAG: v_fma_f32 [[FMA0:v[0-9]+]], v2, -v4, [[NEGZERO]]
-; GCN-DAG: v_fma_f32 [[FMA1:v[0-9]+]], v3, -v5, [[NEGZERO]]
-; GCN-DAG: v_sub_f32_e32 [[SUB0:v[0-9]+]], [[FMA0]], v0
-; GCN-DAG: v_sub_f32_e32 [[SUB1:v[0-9]+]], [[FMA1]], v1
-; GCN-DAG: v_mul_f32_e32 v0, [[SUB0]], v4
-; GCN-DAG: v_mul_f32_e32 v1, [[SUB1]], v5
-; GCN: s_setpc_b64
define <2 x float> @fneg_fma_fneg_dagcombine_loop(<2 x float> %arg, <2 x float> %arg1, <2 x float> %arg2) #0 {
+; GCN-LABEL: fneg_fma_fneg_dagcombine_loop:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: s_brev_b32 s4, 1
+; GCN-NEXT: v_fma_f32 v3, v3, -v5, s4
+; GCN-NEXT: v_fma_f32 v2, v2, -v4, s4
+; GCN-NEXT: v_sub_f32_e32 v1, v3, v1
+; GCN-NEXT: v_sub_f32_e32 v0, v2, v0
+; GCN-NEXT: v_mul_f32_e32 v0, v0, v4
+; GCN-NEXT: v_mul_f32_e32 v1, v1, v5
+; GCN-NEXT: s_setpc_b64 s[30:31]
bb:
%i3 = call fast <2 x float> @llvm.fma.v2f32(<2 x float> %arg1, <2 x float> %arg2, <2 x float> zeroinitializer)
%i4 = fadd fast <2 x float> %i3, %arg
@@ -2608,10 +7753,12 @@ bb:
}
; This expects denormal flushing, so can't turn this fmul into fneg
-; GCN-LABEL: {{^}}nnan_fmul_neg1_to_fneg:
-; GCN: s_waitcnt
-; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
+; GCN-LABEL: nnan_fmul_neg1_to_fneg:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
%mul = fmul float %x, -1.0
%add = fmul nnan float %mul, %y
ret float %add
@@ -2619,45 +7766,52 @@ define float @nnan_fmul_neg1_to_fneg(float %x, float %y) #0 {
; It's legal to turn this fmul into an fneg since denormals are
; preserved and we know an snan can't happen from the flag.
-; GCN-LABEL: {{^}}denormal_fmul_neg1_to_fneg:
-; GCN: v_mul_f32_e64 v0, -v0, v1
-; GCN-NEXT: s_setpc_b64
define float @denormal_fmul_neg1_to_fneg(float %x, float %y) {
+; GCN-LABEL: denormal_fmul_neg1_to_fneg:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
%mul = fmul nnan float %x, -1.0
%add = fmul float %mul, %y
ret float %add
}
; know the source can't be an snan
-; GCN-LABEL: {{^}}denorm_snan_fmul_neg1_to_fneg:
-; GCN: s_waitcnt
-; GCN-NEXT: v_mul_f32_e64 [[TMP:v[0-9]+]], v0, -v0
-; GCN-NEXT: v_mul_f32_e32 v0, [[TMP]], v1
-; GCN-NEXT: s_setpc_b64
define float @denorm_snan_fmul_neg1_to_fneg(float %x, float %y) {
+; GCN-LABEL: denorm_snan_fmul_neg1_to_fneg:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e64 v0, v0, -v0
+; GCN-NEXT: v_mul_f32_e32 v0, v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
%canonical = fmul float %x, %x
%mul = fmul float %canonical, -1.0
%add = fmul float %mul, %y
ret float %add
}
-; GCN-LABEL: {{^}}flush_snan_fmul_neg1_to_fneg:
-; GCN: s_waitcnt
-; GCN-NEXT: v_mul_f32_e32 [[TMP:v[0-9]+]], 1.0, v0
-; GCN-NEXT: v_mul_f32_e64 v0, -[[TMP]], v1
define float @flush_snan_fmul_neg1_to_fneg(float %x, float %y) #0 {
+; GCN-LABEL: flush_snan_fmul_neg1_to_fneg:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mul_f32_e32 v0, 1.0, v0
+; GCN-NEXT: v_mul_f32_e64 v0, -v0, v1
+; GCN-NEXT: s_setpc_b64 s[30:31]
%quiet = call float @llvm.canonicalize.f32(float %x)
%mul = fmul float %quiet, -1.0
%add = fmul float %mul, %y
ret float %add
}
-; GCN-LABEL: {{^}}fadd_select_fneg_fneg_f32:
-; GCN: v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
-; GCN-NEXT: v_sub_f32_e32 v0, v3, v0
-; GCN-NEXT: s_setpc_b64
define float @fadd_select_fneg_fneg_f32(i32 %arg0, float %x, float %y, float %z) {
+; GCN-LABEL: fadd_select_fneg_fneg_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT: v_sub_f32_e32 v0, v3, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %arg0, 0
%neg.x = fneg float %x
%neg.y = fneg float %y
@@ -2666,13 +7820,15 @@ define float @fadd_select_fneg_fneg_f32(i32 %arg0, float %x, float %y, float %z)
ret float %add
}
-; GCN-LABEL: {{^}}fadd_select_fneg_fneg_f64:
-; GCN: v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
-; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
-; GCN-NEXT: v_add_f64 v[0:1], v[5:6], -v[1:2]
-; GCN-NEXT: s_setpc_b64
define double @fadd_select_fneg_fneg_f64(i32 %arg0, double %x, double %y, double %z) {
+; GCN-LABEL: fadd_select_fneg_fneg_f64:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GCN-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GCN-NEXT: v_add_f64 v[0:1], v[5:6], -v[1:2]
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %arg0, 0
%neg.x = fneg double %x
%neg.y = fneg double %y
@@ -2681,23 +7837,28 @@ define double @fadd_select_fneg_fneg_f64(i32 %arg0, double %x, double %y, double
ret double %add
}
-; GCN-LABEL: {{^}}fadd_select_fneg_fneg_f16:
-; SI: v_cvt_f16_f32
-; SI: v_cvt_f16_f32
-; SI: v_cvt_f16_f32
-; SI: v_cmp_eq_u32
-; SI: v_cvt_f32_f16
-; SI: v_cvt_f32_f16
-; SI: v_cvt_f32_f16
-; SI: v_cndmask_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}, vcc
-; SI-NEXT: v_sub_f32_e32
-; SI-NEXT: s_setpc_b64
-
-; VI: v_cmp_eq_u32_e32 vcc, 0, v0
-; VI-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
-; VI-NEXT: v_sub_f16_e32 v0, v3, v0
-; VI-NEXT: s_setpc_b64
define half @fadd_select_fneg_fneg_f16(i32 %arg0, half %x, half %y, half %z) {
+; SI-LABEL: fadd_select_fneg_fneg_f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; SI-NEXT: v_sub_f32_e32 v0, v3, v0
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: fadd_select_fneg_fneg_f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; VI-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; VI-NEXT: v_sub_f16_e32 v0, v3, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %arg0, 0
%neg.x = fneg half %x
%neg.y = fneg half %y
@@ -2707,29 +7868,40 @@ define half @fadd_select_fneg_fneg_f16(i32 %arg0, half %x, half %y, half %z) {
}
; FIXME: Terrible code for SI
-; GCN-LABEL: {{^}}fadd_select_fneg_fneg_v2f16:
-; SI: v_cvt_f16_f32
-; SI: v_cvt_f16_f32
-; SI: v_cvt_f16_f32
-; SI: v_cvt_f16_f32
-; SI: v_cmp_eq_u32
-; SI: v_lshlrev_b32_e32
-; SI: v_or_b32_e32
-; SI: v_cndmask_b32
-; SI: v_lshrrev_b32
-; SI: v_cvt_f32_f16
-; SI: v_cvt_f32_f16
-; SI: v_cvt_f32_f16
-; SI: v_cvt_f32_f16
-; SI: v_sub_f32
-; SI: v_sub_f32
-
-; VI: v_cmp_eq_u32_e32 vcc, 0, v0
-; VI-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
-; VI-NEXT: v_sub_f16_sdwa v1, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
-; VI-NEXT: v_sub_f16_e32 v0, v3, v0
-; VI-NEXT: v_or_b32_e32 v0, v0, v1
define <2 x half> @fadd_select_fneg_fneg_v2f16(i32 %arg0, <2 x half> %x, <2 x half> %y, <2 x half> %z) {
+; SI-LABEL: fadd_select_fneg_fneg_v2f16:
+; SI: ; %bb.0:
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_or_b32_e32 v1, v1, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v6
+; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_sub_f32_e32 v0, v4, v0
+; SI-NEXT: v_sub_f32_e32 v1, v3, v1
+; SI-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: fadd_select_fneg_fneg_v2f16:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; VI-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; VI-NEXT: v_sub_f16_sdwa v1, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
+; VI-NEXT: v_sub_f16_e32 v0, v3, v0
+; VI-NEXT: v_or_b32_e32 v0, v0, v1
+; VI-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %arg0, 0
%neg.x = fneg <2 x half> %x
%neg.y = fneg <2 x half> %y
@@ -2739,13 +7911,14 @@ define <2 x half> @fadd_select_fneg_fneg_v2f16(i32 %arg0, <2 x half> %x, <2 x ha
}
; FIXME: This fneg should fold into select
-; GCN-LABEL: {{^}}v_fneg_select_f32:
-; GCN: s_waitcnt
-; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
-; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-; GCN-NEXT: s_setpc_b64
define float @v_fneg_select_f32(i32 %arg0, float %a, float %b, float %c) {
+; GCN-LABEL: v_fneg_select_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cond = icmp eq i32 %arg0, 0
%select = select i1 %cond, float %a, float %b
%fneg = fneg float %select
@@ -2753,22 +7926,16 @@ define float @v_fneg_select_f32(i32 %arg0, float %a, float %b, float %c) {
}
; FIXME: This fneg should fold into select
-; GCN-LABEL: {{^}}v_fneg_select_2_f32:
-; GCN: s_waitcnt
-; GCN-NSZ-NEXT: v_add_f32_e32 [[ADD2:v[0-9]+]], 2.0, v1
-; GCN-NSZ-NEXT: v_add_f32_e32 [[ADD4:v[0-9]+]], 4.0, v2
-; GCN-NSZ-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-NSZ-NEXT: v_cndmask_b32_e32 v0, [[ADD4]], [[ADD2]], vcc
-; GCN-NSZ-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-
-; GCN-SAFE-NEXT: v_add_f32_e32 [[ADD2:v[0-9]+]], 2.0, v1
-; GCN-SAFE-NEXT: v_add_f32_e32 [[ADD4:v[0-9]+]], 4.0, v2
-; GCN-SAFE-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GCN-SAFE-NEXT: v_cndmask_b32_e32 v0, [[ADD4]], [[ADD2]], vcc
-; GCN-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
-
-; GCN-NEXT: s_setpc_b64
define float @v_fneg_select_2_f32(i32 %arg0, float %a, float %b, float %c) {
+; GCN-LABEL: v_fneg_select_2_f32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_add_f32_e32 v1, 2.0, v1
+; GCN-NEXT: v_add_f32_e32 v2, 4.0, v2
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
+; GCN-NEXT: v_cndmask_b32_e32 v0, v2, v1, vcc
+; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
%cond = icmp eq i32 %arg0, 0
%add.0 = fadd float %a, 2.0
%add.1 = fadd float %b, 4.0
@@ -2777,11 +7944,44 @@ define float @v_fneg_select_2_f32(i32 %arg0, float %a, float %b, float %c) {
ret float %neg.select
}
-; GCN-LABEL: {{^}}v_fneg_posk_select_f32:
-; GCN: v_cmp_ne_u32_e32 vcc, 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v{{[0-9]+}}, 4.0, v{{[0-9]+}}, vcc
-; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
define amdgpu_kernel void @v_fneg_posk_select_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) {
+; SI-LABEL: v_fneg_posk_select_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v3, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v4, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s1
+; SI-NEXT: v_add_i32_e32 v1, vcc, s0, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; SI-NEXT: v_cndmask_b32_e32 v0, 4.0, v4, vcc
+; SI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; SI-NEXT: flat_store_dword v[1:2], v0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_posk_select_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v3, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v4, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s1
+; VI-NEXT: v_add_u32_e32 v1, vcc, s0, v3
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; VI-NEXT: v_cndmask_b32_e32 v0, 4.0, v4, vcc
+; VI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; VI-NEXT: flat_store_dword v[1:2], v0
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
@@ -2794,11 +7994,44 @@ define amdgpu_kernel void @v_fneg_posk_select_f32(ptr addrspace(1) %out, ptr add
ret void
}
-; GCN-LABEL: {{^}}v_fneg_negk_select_f32:
-; GCN: v_cmp_ne_u32_e32 vcc, 0, v0
-; GCN-NEXT: v_cndmask_b32_e32 v{{[0-9]+}}, -4.0, v{{[0-9]+}}, vcc
-; GCN-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
define amdgpu_kernel void @v_fneg_negk_select_f32(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr) {
+; SI-LABEL: v_fneg_negk_select_f32:
+; SI: ; %bb.0:
+; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; SI-NEXT: v_lshlrev_b32_e32 v3, 2, v0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s3
+; SI-NEXT: v_add_i32_e32 v1, vcc, s2, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: flat_load_dword v4, v[1:2] glc
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v2, s1
+; SI-NEXT: v_add_i32_e32 v1, vcc, s0, v3
+; SI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; SI-NEXT: v_cndmask_b32_e32 v0, -4.0, v4, vcc
+; SI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; SI-NEXT: flat_store_dword v[1:2], v0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: v_fneg_negk_select_f32:
+; VI: ; %bb.0:
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; VI-NEXT: v_lshlrev_b32_e32 v3, 2, v0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s3
+; VI-NEXT: v_add_u32_e32 v1, vcc, s2, v3
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: flat_load_dword v4, v[1:2] glc
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v2, s1
+; VI-NEXT: v_add_u32_e32 v1, vcc, s0, v3
+; VI-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; VI-NEXT: v_cndmask_b32_e32 v0, -4.0, v4, vcc
+; VI-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+; VI-NEXT: flat_store_dword v[1:2], v0
+; VI-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
%tid.ext = sext i32 %tid to i64
%a.gep = getelementptr inbounds float, ptr addrspace(1) %a.ptr, i64 %tid.ext
More information about the llvm-commits
mailing list