[llvm] 81589a3 - [AMDGPU] Regenerate test checks for mad24 tests (#162455)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Oct 8 05:36:54 PDT 2025
Author: Simon Pilgrim
Date: 2025-10-08T13:36:50+01:00
New Revision: 81589a39bff58e5a750ea7b656f66cf941803e3b
URL: https://github.com/llvm/llvm-project/commit/81589a39bff58e5a750ea7b656f66cf941803e3b
DIFF: https://github.com/llvm/llvm-project/commit/81589a39bff58e5a750ea7b656f66cf941803e3b.diff
LOG: [AMDGPU] Regenerate test checks for mad24 tests (#162455)
Added:
Modified:
llvm/test/CodeGen/AMDGPU/mad_int24.ll
llvm/test/CodeGen/AMDGPU/mad_uint24.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AMDGPU/mad_int24.ll b/llvm/test/CodeGen/AMDGPU/mad_int24.ll
index 93fda9479273c..dd883104cd799 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_int24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_int24.ll
@@ -1,17 +1,79 @@
-; RUN: llc < %s -mtriple=amdgcn | FileCheck %s --check-prefix=GCN --check-prefix=FUNC
-; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global | FileCheck %s --check-prefix=GCN --check-prefix=FUNC
-; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s --check-prefix=CM --check-prefix=FUNC
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=amdgcn| FileCheck %s --check-prefixes=GCN
+; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global | FileCheck %s --check-prefixes=VI
+; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s --check-prefixes=EG,R600,RW
+; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s --check-prefixes=EG,R600,CM
-; FUNC-LABEL: {{^}}i32_mad24:
; Signed 24-bit multiply is not supported on pre-Cayman GPUs.
-; EG: MULLO_INT
-; CM: MULLO_INT
-; GCN: s_bfe_i32
-; GCN: s_bfe_i32
-; GCN: s_mul_i32
-; GCN: s_add_i32
define amdgpu_kernel void @i32_mad24(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: i32_mad24:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_bfe_i32 s0, s0, 0x180000
+; GCN-NEXT: s_bfe_i32 s1, s1, 0x180000
+; GCN-NEXT: s_mul_i32 s0, s0, s1
+; GCN-NEXT: s_add_i32 s0, s0, s2
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; VI-LABEL: i32_mad24:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c
+; VI-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
+; VI-NEXT: s_mov_b32 s7, 0xf000
+; VI-NEXT: s_mov_b32 s6, -1
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_bfe_i32 s0, s0, 0x180000
+; VI-NEXT: s_bfe_i32 s1, s1, 0x180000
+; VI-NEXT: s_mul_i32 s0, s0, s1
+; VI-NEXT: s_add_i32 s0, s0, s2
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; VI-NEXT: s_endpgm
+;
+; RW-LABEL: i32_mad24:
+; RW: ; %bb.0: ; %entry
+; RW-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[]
+; RW-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; RW-NEXT: CF_END
+; RW-NEXT: PAD
+; RW-NEXT: ALU clause starting at 4:
+; RW-NEXT: LSHL T0.W, KC0[2].Z, literal.x,
+; RW-NEXT: LSHL * T1.W, KC0[2].W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR T1.W, PS, literal.x,
+; RW-NEXT: ASHR * T0.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: MULLO_INT * T0.X, PS, PV.W,
+; RW-NEXT: ADD_INT T0.X, PS, KC0[3].X,
+; RW-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; RW-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: i32_mad24:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 12, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: ALU clause starting at 4:
+; CM-NEXT: LSHL T0.Z, KC0[2].Z, literal.x,
+; CM-NEXT: LSHL * T0.W, KC0[2].W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ASHR T1.Z, PV.W, literal.x,
+; CM-NEXT: ASHR * T0.W, PV.Z, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T1.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T1.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T1.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T1.Z,
+; CM-NEXT: ADD_INT * T0.X, PV.X, KC0[3].X,
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
entry:
%0 = shl i32 %a, 8
%a_24 = ashr i32 %0, 8
@@ -23,13 +85,25 @@ entry:
ret void
}
-; GCN-LABEL: {{^}}mad24_known_bits_destroyed:
-; GCN: s_waitcnt
-; GCN-NEXT: v_mad_i32_i24
-; GCN-NEXT: v_mul_i32_i24
-; GCN-NEXT: s_setpc_b64
define i32 @mad24_known_bits_destroyed(i32 %a, i32 %b, i32 %c) {
-
+; GCN-LABEL: mad24_known_bits_destroyed:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mad_i32_i24 v1, v0, v1, v2
+; GCN-NEXT: v_mul_i32_i24_e32 v0, v1, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: mad24_known_bits_destroyed:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_mad_i32_i24 v1, v0, v1, v2
+; VI-NEXT: v_mul_i32_i24_e32 v0, v1, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; EG-LABEL: mad24_known_bits_destroyed:
+; EG: ; %bb.0:
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
%shl.0 = shl i32 %a, 8
%sra.0 = ashr i32 %shl.0, 8
%shl.1 = shl i32 %b, 8
@@ -48,12 +122,25 @@ define i32 @mad24_known_bits_destroyed(i32 %a, i32 %b, i32 %c) {
ret i32 %mul1
}
-; GCN-LABEL: {{^}}mad24_intrin_known_bits_destroyed:
-; GCN: s_waitcnt
-; GCN-NEXT: v_mad_i32_i24
-; GCN-NEXT: v_mul_i32_i24
-; GCN-NEXT: s_setpc_b64
define i32 @mad24_intrin_known_bits_destroyed(i32 %a, i32 %b, i32 %c) {
+; GCN-LABEL: mad24_intrin_known_bits_destroyed:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mad_i32_i24 v1, v0, v1, v2
+; GCN-NEXT: v_mul_i32_i24_e32 v0, v1, v0
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: mad24_intrin_known_bits_destroyed:
+; VI: ; %bb.0:
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_mad_i32_i24 v1, v0, v1, v2
+; VI-NEXT: v_mul_i32_i24_e32 v0, v1, v0
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; EG-LABEL: mad24_intrin_known_bits_destroyed:
+; EG: ; %bb.0:
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
%shl.0 = shl i32 %a, 8
%sra.0 = ashr i32 %shl.0, 8
%shl.1 = shl i32 %b, 8
@@ -73,17 +160,177 @@ define i32 @mad24_intrin_known_bits_destroyed(i32 %a, i32 %b, i32 %c) {
}
; Make sure no unnecessary BFEs are emitted in the loop.
-; GCN-LABEL: {{^}}mad24_destroyed_knownbits_2:
-; GCN-NOT: v_bfe
-; GCN: v_mad_i32_i24
-; GCN-NOT: v_bfe
-; GCN: v_mad_i32_i24
-; GCN-NOT: v_bfe
-; GCN: v_mad_i32_i24
-; GCN-NOT: v_bfe
-; GCN: v_mad_i32_i24
-; GCN-NOT: v_bfe
define void @mad24_destroyed_knownbits_2(i32 %arg, i32 %arg1, i32 %arg2, ptr addrspace(1) %arg3) {
+; GCN-LABEL: mad24_destroyed_knownbits_2:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v5, 1
+; GCN-NEXT: s_mov_b64 s[4:5], 0
+; GCN-NEXT: .LBB3_1: ; %bb6
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: v_mad_i32_i24 v0, v0, v5, v5
+; GCN-NEXT: v_add_i32_e32 v1, vcc, -1, v1
+; GCN-NEXT: v_mad_i32_i24 v5, v0, v5, v0
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; GCN-NEXT: v_mad_i32_i24 v0, v5, v0, v5
+; GCN-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GCN-NEXT: v_mad_i32_i24 v0, v0, v5, v0
+; GCN-NEXT: v_mov_b32_e32 v5, v2
+; GCN-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GCN-NEXT: s_cbranch_execnz .LBB3_1
+; GCN-NEXT: ; %bb.2: ; %bb5
+; GCN-NEXT: s_or_b64 exec, exec, s[4:5]
+; GCN-NEXT: s_mov_b32 s6, 0
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s4, s6
+; GCN-NEXT: s_mov_b32 s5, s6
+; GCN-NEXT: buffer_store_dword v0, v[3:4], s[4:7], 0 addr64
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; VI-LABEL: mad24_destroyed_knownbits_2:
+; VI: ; %bb.0: ; %bb
+; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v5, 1
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_1: ; %bb6
+; VI-NEXT: ; =>This Inner Loop Header: Depth=1
+; VI-NEXT: v_mad_i32_i24 v0, v0, v5, v5
+; VI-NEXT: v_mad_i32_i24 v5, v0, v5, v0
+; VI-NEXT: v_add_u32_e32 v1, vcc, -1, v1
+; VI-NEXT: v_mad_i32_i24 v0, v5, v0, v5
+; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
+; VI-NEXT: v_mad_i32_i24 v0, v0, v5, v0
+; VI-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; VI-NEXT: v_mov_b32_e32 v5, v2
+; VI-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; VI-NEXT: s_cbranch_execnz .LBB3_1
+; VI-NEXT: ; %bb.2: ; %bb5
+; VI-NEXT: s_or_b64 exec, exec, s[4:5]
+; VI-NEXT: flat_store_dword v[3:4], v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: s_setpc_b64 s[30:31]
+;
+; RW-LABEL: mad24_destroyed_knownbits_2:
+; RW: ; %bb.0: ; %bb
+; RW-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; RW-NEXT: LOOP_START_DX10 @7
+; RW-NEXT: ALU_PUSH_BEFORE 30, @16, KC0[], KC1[]
+; RW-NEXT: JUMP @6 POP:1
+; RW-NEXT: LOOP_BREAK @6
+; RW-NEXT: POP @6 POP:1
+; RW-NEXT: END_LOOP @2
+; RW-NEXT: ALU 1, @47, KC0[], KC1[]
+; RW-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; RW-NEXT: CF_END
+; RW-NEXT: ALU clause starting at 10:
+; RW-NEXT: MOV T0.X, KC0[2].Y,
+; RW-NEXT: MOV T0.Y, KC0[2].Z,
+; RW-NEXT: MOV * T0.Z, KC0[2].W,
+; RW-NEXT: MOV T0.W, KC0[3].X,
+; RW-NEXT: MOV * T1.W, literal.x,
+; RW-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; RW-NEXT: ALU clause starting at 16:
+; RW-NEXT: LSHL T2.W, T1.W, literal.x,
+; RW-NEXT: LSHL * T3.W, T0.X, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR T3.W, PS, literal.x,
+; RW-NEXT: ASHR * T2.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: MULLO_INT * T0.X, PV.W, PS,
+; RW-NEXT: ADD_INT * T1.W, PS, T1.W,
+; RW-NEXT: LSHL * T3.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR * T3.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: MULLO_INT * T0.X, PV.W, T2.W,
+; RW-NEXT: ADD_INT * T1.W, PS, T1.W,
+; RW-NEXT: LSHL * T2.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR * T2.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: MULLO_INT * T0.X, PV.W, T3.W,
+; RW-NEXT: ADD_INT * T1.W, PS, T1.W,
+; RW-NEXT: LSHL * T3.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ASHR * T3.W, PV.W, literal.x,
+; RW-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; RW-NEXT: ADD_INT T0.Y, T0.Y, literal.x,
+; RW-NEXT: MULLO_INT * T0.X, PV.W, T2.W,
+; RW-NEXT: -1(nan), 0(0.000000e+00)
+; RW-NEXT: ADD_INT T0.X, PS, T1.W,
+; RW-NEXT: SETE_INT T2.W, PV.Y, 0.0,
+; RW-NEXT: MOV * T1.W, T0.Z,
+; RW-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; RW-NEXT: ALU clause starting at 47:
+; RW-NEXT: LSHR * T1.X, T0.W, literal.x,
+; RW-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: mad24_destroyed_knownbits_2:
+; CM: ; %bb.0: ; %bb
+; CM-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; CM-NEXT: LOOP_START_DX10 @7
+; CM-NEXT: ALU_PUSH_BEFORE 41, @16, KC0[], KC1[]
+; CM-NEXT: JUMP @6 POP:1
+; CM-NEXT: LOOP_BREAK @6
+; CM-NEXT: POP @6 POP:1
+; CM-NEXT: END_LOOP @2
+; CM-NEXT: ALU 1, @58, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T1.X, T0.X
+; CM-NEXT: CF_END
+; CM-NEXT: ALU clause starting at 10:
+; CM-NEXT: MOV * T1.X, KC0[2].Y,
+; CM-NEXT: MOV T0.X, KC0[2].Z,
+; CM-NEXT: MOV T0.Y, KC0[2].W,
+; CM-NEXT: MOV T0.Z, KC0[3].X,
+; CM-NEXT: MOV * T0.W, literal.x,
+; CM-NEXT: 1(1.401298e-45), 0(0.000000e+00)
+; CM-NEXT: ALU clause starting at 16:
+; CM-NEXT: LSHL T1.Z, T0.W, literal.x,
+; CM-NEXT: LSHL * T1.W, T1.X, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ASHR T2.Z, PV.W, literal.x,
+; CM-NEXT: ASHR * T1.W, PV.Z, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T1.X, T2.Z, T1.W,
+; CM-NEXT: MULLO_INT T1.Y (MASKED), T2.Z, T1.W,
+; CM-NEXT: MULLO_INT T1.Z (MASKED), T2.Z, T1.W,
+; CM-NEXT: MULLO_INT * T1.W (MASKED), T2.Z, T1.W,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.W,
+; CM-NEXT: LSHL * T2.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ASHR * T2.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T1.X, T2.W, T1.W,
+; CM-NEXT: MULLO_INT T1.Y (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT T1.Z (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT * T1.W (MASKED), T2.W, T1.W,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.W,
+; CM-NEXT: LSHL * T1.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ASHR * T1.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T1.X, T1.W, T2.W,
+; CM-NEXT: MULLO_INT T1.Y (MASKED), T1.W, T2.W,
+; CM-NEXT: MULLO_INT T1.Z (MASKED), T1.W, T2.W,
+; CM-NEXT: MULLO_INT * T1.W (MASKED), T1.W, T2.W,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.W,
+; CM-NEXT: LSHL * T2.W, PV.W, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: ADD_INT T0.X, T0.X, literal.x,
+; CM-NEXT: ASHR * T2.W, PV.W, literal.y,
+; CM-NEXT: -1(nan), 8(1.121039e-44)
+; CM-NEXT: MULLO_INT T1.X, T2.W, T1.W,
+; CM-NEXT: MULLO_INT T1.Y (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT T1.Z (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT * T1.W (MASKED), T2.W, T1.W,
+; CM-NEXT: ADD_INT T1.X, PV.X, T0.W,
+; CM-NEXT: SETE_INT T1.Z, T0.X, 0.0,
+; CM-NEXT: MOV * T0.W, T0.Y,
+; CM-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.Z, 0.0,
+; CM-NEXT: ALU clause starting at 58:
+; CM-NEXT: LSHR * T0.X, T0.Z, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
bb:
br label %bb6
@@ -119,3 +366,5 @@ bb6: ; preds = %bb6, %bb
}
declare i32 @llvm.amdgcn.mul.i24(i32, i32)
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; R600: {{.*}}
diff --git a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
index a6d458eeb0978..46b8df4b4537e 100644
--- a/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad_uint24.ll
@@ -1,19 +1,75 @@
-; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
-; RUN: llc < %s -mtriple=amdgcn | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN
-; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
-; RUN: llc < %s -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 6
+; RUN: llc < %s -mtriple=r600 -mcpu=redwood | FileCheck %s --check-prefixes=EG
+; RUN: llc < %s -mtriple=r600 -mcpu=cayman | FileCheck %s --check-prefixes=CM
+; RUN: llc < %s -mtriple=amdgcn | FileCheck %s --check-prefixes=GCN
+; RUN: llc < %s -mtriple=amdgcn -mcpu=tonga -mattr=-flat-for-global | FileCheck %s --check-prefixes=GFX8,SI
+; RUN: llc < %s -mtriple=amdgcn -mcpu=fiji -mattr=-flat-for-global | FileCheck %s --check-prefixes=GFX8,VI
declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
-; FUNC-LABEL: {{^}}u32_mad24:
-; EG: MULLO_INT
-; SI: s_mul_i32
-; SI: s_add_i32
-; VI: s_mul_{{[iu]}}32
-; VI: s_add_{{[iu]}}32
-
define amdgpu_kernel void @u32_mad24(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c) {
+; EG-LABEL: u32_mad24:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 6, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: ALU clause starting at 4:
+; EG-NEXT: AND_INT T0.W, KC0[2].W, literal.x,
+; EG-NEXT: AND_INT * T1.W, KC0[2].Z, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.X, PS, PV.W,
+; EG-NEXT: ADD_INT T0.X, PS, KC0[3].X,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: u32_mad24:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: ALU clause starting at 4:
+; CM-NEXT: AND_INT T0.Z, KC0[2].W, literal.x,
+; CM-NEXT: AND_INT * T0.W, KC0[2].Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T0.Z,
+; CM-NEXT: ADD_INT * T0.X, PV.X, KC0[3].X,
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: u32_mad24:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_and_b32 s0, s0, 0xffffff
+; GCN-NEXT: s_and_b32 s1, s1, 0xffffff
+; GCN-NEXT: s_mul_i32 s0, s0, s1
+; GCN-NEXT: s_add_i32 s0, s0, s2
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s0
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: u32_mad24:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x2c
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_and_b32 s0, s0, 0xffffff
+; GFX8-NEXT: s_and_b32 s1, s1, 0xffffff
+; GFX8-NEXT: s_mul_i32 s0, s0, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s2
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_endpgm
entry:
%0 = shl i32 %a, 8
%a_24 = lshr i32 %0, 8
@@ -25,18 +81,88 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}i16_mad24:
; The order of A and B does not matter.
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 16
-; GCN: s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
-; GCN: s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
-; GCN: s_sext_i32_i16 [[EXT:s[0-9]]], [[MAD]]
-; GCN: v_mov_b32_e32 v0, [[EXT]]
define amdgpu_kernel void @i16_mad24(ptr addrspace(1) %out, i16 %a, i16 %b, i16 %c) {
+; EG-LABEL: i16_mad24:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 0, @12, KC0[], KC1[]
+; EG-NEXT: TEX 2 @6
+; EG-NEXT: ALU 4, @13, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_16 T1.X, T0.X, 40, #3
+; EG-NEXT: VTX_READ_16 T2.X, T0.X, 42, #3
+; EG-NEXT: VTX_READ_16 T0.X, T0.X, 44, #3
+; EG-NEXT: ALU clause starting at 12:
+; EG-NEXT: MOV * T0.X, 0.0,
+; EG-NEXT: ALU clause starting at 13:
+; EG-NEXT: MULLO_INT * T0.Y, T1.X, T2.X,
+; EG-NEXT: ADD_INT * T0.W, PS, T0.X,
+; EG-NEXT: BFE_INT T0.X, PV.W, 0.0, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 16(2.242078e-44), 2(2.802597e-45)
+;
+; CM-LABEL: i16_mad24:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 0, @12, KC0[], KC1[]
+; CM-NEXT: TEX 2 @6
+; CM-NEXT: ALU 8, @13, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 6:
+; CM-NEXT: VTX_READ_16 T1.X, T0.X, 40, #3
+; CM-NEXT: VTX_READ_16 T2.X, T0.X, 42, #3
+; CM-NEXT: VTX_READ_16 T0.X, T0.X, 44, #3
+; CM-NEXT: ALU clause starting at 12:
+; CM-NEXT: MOV * T0.X, 0.0,
+; CM-NEXT: ALU clause starting at 13:
+; CM-NEXT: MULLO_INT T0.X (MASKED), T1.X, T2.X,
+; CM-NEXT: MULLO_INT T0.Y, T1.X, T2.X,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T1.X, T2.X,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T1.X, T2.X,
+; CM-NEXT: ADD_INT * T0.W, PV.Y, T0.X,
+; CM-NEXT: BFE_INT * T0.X, PV.W, 0.0, literal.x,
+; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i16_mad24:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
+; GCN-NEXT: s_load_dword s4, s[4:5], 0xb
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_lshr_b32 s2, s2, 16
+; GCN-NEXT: s_mul_i32 s2, s4, s2
+; GCN-NEXT: s_add_i32 s2, s2, s3
+; GCN-NEXT: s_sext_i32_i16 s2, s2
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: s_mov_b32 s4, s0
+; GCN-NEXT: s_mov_b32 s5, s1
+; GCN-NEXT: v_mov_b32_e32 v0, s2
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: i16_mad24:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
+; GFX8-NEXT: s_load_dword s8, s[4:5], 0x2c
+; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s4, s0
+; GFX8-NEXT: s_lshr_b32 s0, s2, 16
+; GFX8-NEXT: s_mul_i32 s0, s8, s0
+; GFX8-NEXT: s_add_i32 s0, s0, s3
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_mov_b32 s5, s1
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_endpgm
entry:
%0 = mul i16 %a, %b
%1 = add i16 %0, %c
@@ -46,17 +172,85 @@ entry:
}
; FIXME: Need to handle non-uniform case for function below (load without gep).
-; FUNC-LABEL: {{^}}i8_mad24:
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 8
-; GCN: s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
-; GCN: s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
-; GCN: s_sext_i32_i8 [[EXT:s[0-9]]], [[MAD]]
-; GCN: v_mov_b32_e32 v0, [[EXT]]
define amdgpu_kernel void @i8_mad24(ptr addrspace(1) %out, i8 %a, i8 %b, i8 %c) {
+; EG-LABEL: i8_mad24:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 0, @12, KC0[], KC1[]
+; EG-NEXT: TEX 2 @6
+; EG-NEXT: ALU 4, @13, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 6:
+; EG-NEXT: VTX_READ_8 T1.X, T0.X, 40, #3
+; EG-NEXT: VTX_READ_8 T2.X, T0.X, 41, #3
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 42, #3
+; EG-NEXT: ALU clause starting at 12:
+; EG-NEXT: MOV * T0.X, 0.0,
+; EG-NEXT: ALU clause starting at 13:
+; EG-NEXT: MULLO_INT * T0.Y, T1.X, T2.X,
+; EG-NEXT: ADD_INT * T0.W, PS, T0.X,
+; EG-NEXT: BFE_INT T0.X, PV.W, 0.0, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 8(1.121039e-44), 2(2.802597e-45)
+;
+; CM-LABEL: i8_mad24:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 0, @12, KC0[], KC1[]
+; CM-NEXT: TEX 2 @6
+; CM-NEXT: ALU 8, @13, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 6:
+; CM-NEXT: VTX_READ_8 T1.X, T0.X, 40, #3
+; CM-NEXT: VTX_READ_8 T2.X, T0.X, 41, #3
+; CM-NEXT: VTX_READ_8 T0.X, T0.X, 42, #3
+; CM-NEXT: ALU clause starting at 12:
+; CM-NEXT: MOV * T0.X, 0.0,
+; CM-NEXT: ALU clause starting at 13:
+; CM-NEXT: MULLO_INT T0.X (MASKED), T1.X, T2.X,
+; CM-NEXT: MULLO_INT T0.Y, T1.X, T2.X,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T1.X, T2.X,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T1.X, T2.X,
+; CM-NEXT: ADD_INT * T0.W, PV.Y, T0.X,
+; CM-NEXT: BFE_INT * T0.X, PV.W, 0.0, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i8_mad24:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dword s2, s[4:5], 0xb
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_lshr_b32 s4, s2, 8
+; GCN-NEXT: s_lshr_b32 s5, s2, 16
+; GCN-NEXT: s_mul_i32 s2, s2, s4
+; GCN-NEXT: s_add_i32 s2, s2, s5
+; GCN-NEXT: s_sext_i32_i8 s4, s2
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: i8_mad24:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dword s6, s[4:5], 0x2c
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_lshr_b32 s4, s6, 8
+; GFX8-NEXT: s_lshr_b32 s5, s6, 16
+; GFX8-NEXT: s_mul_i32 s4, s6, s4
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: s_sext_i32_i8 s4, s4
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX8-NEXT: s_endpgm
entry:
%0 = mul i8 %a, %b
%1 = add i8 %0, %c
@@ -72,11 +266,75 @@ entry:
; 24-bit mad pattern wasn't being matched.
; Check that the select instruction is not deleted.
-; FUNC-LABEL: {{^}}i24_i32_i32_mad:
-; EG: CNDE_INT
-; SI: s_cselect
-; GCN2: s_cselect
define amdgpu_kernel void @i24_i32_i32_mad(ptr addrspace(1) %out, i32 %a, i32 %b, i32 %c, i32 %d) {
+; EG-LABEL: i24_i32_i32_mad:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 7, @4, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: ALU clause starting at 4:
+; EG-NEXT: ASHR * T0.W, KC0[2].Z, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: CNDE_INT * T0.W, KC0[3].X, literal.x, PV.W,
+; EG-NEXT: 34(4.764415e-44), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.X, PV.W, KC0[3].X,
+; EG-NEXT: ADD_INT T0.X, PS, KC0[3].Y,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: i24_i32_i32_mad:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 10, @4, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: ALU clause starting at 4:
+; CM-NEXT: ASHR * T0.W, KC0[2].Z, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: CNDE_INT * T0.W, KC0[3].X, literal.x, PV.W,
+; CM-NEXT: 34(4.764415e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, KC0[3].X,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, KC0[3].X,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, KC0[3].X,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, KC0[3].X,
+; CM-NEXT: ADD_INT * T0.X, PV.X, KC0[3].Y,
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i24_i32_i32_mad:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_load_dword s2, s[4:5], 0xb
+; GCN-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s3, 0xf000
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_ashr_i32 s2, s2, 8
+; GCN-NEXT: s_cmp_lg_u32 s6, 0
+; GCN-NEXT: s_cselect_b32 s2, s2, 34
+; GCN-NEXT: s_mul_i32 s2, s2, s6
+; GCN-NEXT: s_add_i32 s4, s2, s7
+; GCN-NEXT: s_mov_b32 s2, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s4
+; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: i24_i32_i32_mad:
+; GFX8: ; %bb.0: ; %entry
+; GFX8-NEXT: s_load_dword s8, s[4:5], 0x2c
+; GFX8-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s3, 0xf000
+; GFX8-NEXT: s_mov_b32 s2, -1
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_ashr_i32 s4, s8, 8
+; GFX8-NEXT: s_cmp_lg_u32 s6, 0
+; GFX8-NEXT: s_cselect_b32 s4, s4, 34
+; GFX8-NEXT: s_mul_i32 s4, s4, s6
+; GFX8-NEXT: s_add_i32 s4, s4, s7
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; GFX8-NEXT: s_endpgm
entry:
%0 = ashr i32 %a, 8
%1 = icmp ne i32 %c, 0
@@ -87,13 +345,139 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}extra_and:
-; SI-NOT: v_and
-; SI: s_mul_i32
-; SI: s_mul_i32
-; SI: s_add_i32
-; SI: s_add_i32
define amdgpu_kernel void @extra_and(ptr addrspace(1) %arg, i32 %arg2, i32 %arg3) {
+; EG-LABEL: extra_and:
+; EG: ; %bb.0: ; %bb
+; EG-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: LOOP_START_DX10 @7
+; EG-NEXT: ALU_PUSH_BEFORE 12, @16, KC0[], KC1[]
+; EG-NEXT: JUMP @6 POP:1
+; EG-NEXT: LOOP_BREAK @6
+; EG-NEXT: POP @6 POP:1
+; EG-NEXT: END_LOOP @2
+; EG-NEXT: ALU 1, @29, KC0[], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T1.W, literal.x,
+; EG-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; EG-NEXT: MOV * T3.W, PV.W,
+; EG-NEXT: MOV T0.Z, KC0[2].Y,
+; EG-NEXT: MOV T0.W, KC0[2].Z,
+; EG-NEXT: MOV * T2.W, KC0[2].W,
+; EG-NEXT: ALU clause starting at 16:
+; EG-NEXT: AND_INT T1.W, T1.W, literal.x,
+; EG-NEXT: AND_INT * T4.W, T3.W, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: AND_INT T3.W, T3.W, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, PS, PV.W,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.W,
+; EG-NEXT: ADD_INT T3.W, T2.W, PS,
+; EG-NEXT: ADD_INT * T1.W, T0.W, T0.X,
+; EG-NEXT: ADD_INT * T0.X, PS, PV.W,
+; EG-NEXT: SETNE_INT * T4.W, PV.X, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: PRED_SETE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; EG-NEXT: ALU clause starting at 29:
+; EG-NEXT: LSHR * T1.X, T0.Z, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: extra_and:
+; CM: ; %bb.0: ; %bb
+; CM-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; CM-NEXT: LOOP_START_DX10 @7
+; CM-NEXT: ALU_PUSH_BEFORE 17, @16, KC0[], KC1[]
+; CM-NEXT: JUMP @6 POP:1
+; CM-NEXT: LOOP_BREAK @6
+; CM-NEXT: POP @6 POP:1
+; CM-NEXT: END_LOOP @2
+; CM-NEXT: ALU 1, @34, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: ALU clause starting at 10:
+; CM-NEXT: MOV * T0.W, literal.x,
+; CM-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; CM-NEXT: MOV * T1.Z, PV.W,
+; CM-NEXT: MOV T0.Y, KC0[2].Y,
+; CM-NEXT: MOV T0.Z, KC0[2].Z,
+; CM-NEXT: MOV * T1.W, KC0[2].W,
+; CM-NEXT: ALU clause starting at 16:
+; CM-NEXT: AND_INT T1.Y, T1.Z, literal.x,
+; CM-NEXT: AND_INT T2.Z, T0.W, literal.x,
+; CM-NEXT: AND_INT * T0.W, T1.Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT * T0.W, T1.Y, T2.Z,
+; CM-NEXT: ADD_INT T1.Z, T1.W, PV.W,
+; CM-NEXT: ADD_INT * T0.W, T0.Z, T0.X,
+; CM-NEXT: ADD_INT * T0.X, PV.W, PV.Z,
+; CM-NEXT: SETNE_INT * T2.W, PV.X, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: PRED_SETE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; CM-NEXT: ALU clause starting at 34:
+; CM-NEXT: LSHR * T1.X, T0.Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: extra_and:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xb
+; GCN-NEXT: s_mov_b32 s2, 0
+; GCN-NEXT: s_mov_b32 s6, 0
+; GCN-NEXT: .LBB4_1: ; %bb4
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: s_and_b32 s3, s6, 0xffffff
+; GCN-NEXT: s_and_b32 s6, s6, 0xffffff
+; GCN-NEXT: s_and_b32 s2, s2, 0xffffff
+; GCN-NEXT: s_mul_i32 s3, s3, s2
+; GCN-NEXT: s_mul_i32 s6, s6, s2
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s2, s0, s3
+; GCN-NEXT: s_add_i32 s6, s1, s6
+; GCN-NEXT: s_add_i32 s3, s2, s6
+; GCN-NEXT: s_cmp_lg_u32 s3, 8
+; GCN-NEXT: s_cbranch_scc1 .LBB4_1
+; GCN-NEXT: ; %bb.2: ; %bb18
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s3
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: extra_and:
+; GFX8: ; %bb.0: ; %bb
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c
+; GFX8-NEXT: s_mov_b32 s2, 0
+; GFX8-NEXT: s_mov_b32 s6, 0
+; GFX8-NEXT: .LBB4_1: ; %bb4
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_and_b32 s3, s6, 0xffffff
+; GFX8-NEXT: s_and_b32 s6, s6, 0xffffff
+; GFX8-NEXT: s_and_b32 s2, s2, 0xffffff
+; GFX8-NEXT: s_mul_i32 s3, s3, s2
+; GFX8-NEXT: s_mul_i32 s6, s6, s2
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_add_i32 s2, s0, s3
+; GFX8-NEXT: s_add_i32 s6, s1, s6
+; GFX8-NEXT: s_add_i32 s3, s2, s6
+; GFX8-NEXT: s_cmp_lg_u32 s3, 8
+; GFX8-NEXT: s_cbranch_scc1 .LBB4_1
+; GFX8-NEXT: ; %bb.2: ; %bb18
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s3
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_endpgm
bb:
br label %bb4
@@ -119,13 +503,139 @@ bb18: ; preds = %bb4
ret void
}
-; FUNC-LABEL: {{^}}dont_remove_shift
-; SI: s_lshr
-; SI: s_mul_i32
-; SI: s_mul_i32
-; SI: s_add_i32
-; SI: s_add_i32
define amdgpu_kernel void @dont_remove_shift(ptr addrspace(1) %arg, i32 %arg2, i32 %arg3) {
+; EG-LABEL: dont_remove_shift:
+; EG: ; %bb.0: ; %bb
+; EG-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; EG-NEXT: LOOP_START_DX10 @7
+; EG-NEXT: ALU_PUSH_BEFORE 12, @16, KC0[], KC1[]
+; EG-NEXT: JUMP @6 POP:1
+; EG-NEXT: LOOP_BREAK @6
+; EG-NEXT: POP @6 POP:1
+; EG-NEXT: END_LOOP @2
+; EG-NEXT: ALU 1, @29, KC0[], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: ALU clause starting at 10:
+; EG-NEXT: MOV * T1.W, literal.x,
+; EG-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; EG-NEXT: MOV * T3.W, PV.W,
+; EG-NEXT: MOV T0.Z, KC0[2].Y,
+; EG-NEXT: MOV T0.W, KC0[2].Z,
+; EG-NEXT: MOV * T2.W, KC0[2].W,
+; EG-NEXT: ALU clause starting at 16:
+; EG-NEXT: LSHR T1.W, T1.W, literal.x,
+; EG-NEXT: LSHR * T4.W, T3.W, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: LSHR T3.W, T3.W, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, PS, PV.W,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.W,
+; EG-NEXT: ADD_INT T3.W, T2.W, PS,
+; EG-NEXT: ADD_INT * T1.W, T0.W, T0.X,
+; EG-NEXT: ADD_INT * T0.X, PS, PV.W,
+; EG-NEXT: SETNE_INT * T4.W, PV.X, literal.x,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: PRED_SETE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; EG-NEXT: ALU clause starting at 29:
+; EG-NEXT: LSHR * T1.X, T0.Z, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: dont_remove_shift:
+; CM: ; %bb.0: ; %bb
+; CM-NEXT: ALU 5, @10, KC0[CB0:0-32], KC1[]
+; CM-NEXT: LOOP_START_DX10 @7
+; CM-NEXT: ALU_PUSH_BEFORE 17, @16, KC0[], KC1[]
+; CM-NEXT: JUMP @6 POP:1
+; CM-NEXT: LOOP_BREAK @6
+; CM-NEXT: POP @6 POP:1
+; CM-NEXT: END_LOOP @2
+; CM-NEXT: ALU 1, @34, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: ALU clause starting at 10:
+; CM-NEXT: MOV * T0.W, literal.x,
+; CM-NEXT: 0(0.000000e+00), 0(0.000000e+00)
+; CM-NEXT: MOV * T1.Z, PV.W,
+; CM-NEXT: MOV T0.Y, KC0[2].Y,
+; CM-NEXT: MOV T0.Z, KC0[2].Z,
+; CM-NEXT: MOV * T1.W, KC0[2].W,
+; CM-NEXT: ALU clause starting at 16:
+; CM-NEXT: LSHR T1.Y, T1.Z, literal.x,
+; CM-NEXT: LSHR T2.Z, T0.W, literal.x,
+; CM-NEXT: LSHR * T0.W, T1.Z, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T2.Z,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T1.Y, T2.Z,
+; CM-NEXT: MULLO_INT * T0.W, T1.Y, T2.Z,
+; CM-NEXT: ADD_INT T1.Z, T1.W, PV.W,
+; CM-NEXT: ADD_INT * T0.W, T0.Z, T0.X,
+; CM-NEXT: ADD_INT * T0.X, PV.W, PV.Z,
+; CM-NEXT: SETNE_INT * T2.W, PV.X, literal.x,
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: PRED_SETE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+; CM-NEXT: ALU clause starting at 34:
+; CM-NEXT: LSHR * T1.X, T0.Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: dont_remove_shift:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xb
+; GCN-NEXT: s_mov_b32 s2, 0
+; GCN-NEXT: s_mov_b32 s6, 0
+; GCN-NEXT: .LBB5_1: ; %bb4
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: s_lshr_b32 s3, s6, 8
+; GCN-NEXT: s_lshr_b32 s6, s6, 8
+; GCN-NEXT: s_lshr_b32 s2, s2, 8
+; GCN-NEXT: s_mul_i32 s3, s3, s2
+; GCN-NEXT: s_mul_i32 s6, s6, s2
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s2, s0, s3
+; GCN-NEXT: s_add_i32 s6, s1, s6
+; GCN-NEXT: s_add_i32 s3, s2, s6
+; GCN-NEXT: s_cmp_lg_u32 s3, 8
+; GCN-NEXT: s_cbranch_scc1 .LBB5_1
+; GCN-NEXT: ; %bb.2: ; %bb18
+; GCN-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x9
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s6, -1
+; GCN-NEXT: v_mov_b32_e32 v0, s3
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GCN-NEXT: s_endpgm
+;
+; GFX8-LABEL: dont_remove_shift:
+; GFX8: ; %bb.0: ; %bb
+; GFX8-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2c
+; GFX8-NEXT: s_mov_b32 s2, 0
+; GFX8-NEXT: s_mov_b32 s6, 0
+; GFX8-NEXT: .LBB5_1: ; %bb4
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: s_lshr_b32 s3, s6, 8
+; GFX8-NEXT: s_lshr_b32 s6, s6, 8
+; GFX8-NEXT: s_lshr_b32 s2, s2, 8
+; GFX8-NEXT: s_mul_i32 s3, s3, s2
+; GFX8-NEXT: s_mul_i32 s6, s6, s2
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: s_add_i32 s2, s0, s3
+; GFX8-NEXT: s_add_i32 s6, s1, s6
+; GFX8-NEXT: s_add_i32 s3, s2, s6
+; GFX8-NEXT: s_cmp_lg_u32 s3, 8
+; GFX8-NEXT: s_cbranch_scc1 .LBB5_1
+; GFX8-NEXT: ; %bb.2: ; %bb18
+; GFX8-NEXT: s_load_dwordx2 s[4:5], s[4:5], 0x24
+; GFX8-NEXT: s_mov_b32 s7, 0xf000
+; GFX8-NEXT: s_mov_b32 s6, -1
+; GFX8-NEXT: v_mov_b32_e32 v0, s3
+; GFX8-NEXT: s_waitcnt lgkmcnt(0)
+; GFX8-NEXT: buffer_store_dword v0, off, s[4:7], 0
+; GFX8-NEXT: s_endpgm
bb:
br label %bb4
@@ -151,19 +661,234 @@ bb18: ; preds = %bb4
ret void
}
-; FUNC-LABEL: {{^}}i8_mad_sat_16:
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 8
-; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; SI: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
-; SI: v_med3_i32 v{{[0-9]}}, [[EXT]],
-; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; VI: v_max_i16_e32 [[MAX:v[0-9]]], 0xff80, [[MAD]]
-; VI: v_min_i16_e32 {{v[0-9]}}, 0x7f, [[MAX]]
define amdgpu_kernel void @i8_mad_sat_16(ptr addrspace(1) %out, ptr addrspace(1) %in0, ptr addrspace(1) %in1, ptr addrspace(1) %in2, ptr addrspace(5) %idx) {
+; EG-LABEL: i8_mad_sat_16:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @8
+; EG-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @10
+; EG-NEXT: ALU 24, @21, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 8:
+; EG-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; EG-NEXT: Fetch clause starting at 10:
+; EG-NEXT: VTX_READ_8 T3.X, T3.X, 0, #1
+; EG-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; EG-NEXT: ALU clause starting at 14:
+; EG-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; EG-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; EG-NEXT: ADD_INT * T1.X, KC0[2].W, PV.X,
+; EG-NEXT: ALU clause starting at 19:
+; EG-NEXT: ADD_INT T2.X, KC0[2].Z, T0.X,
+; EG-NEXT: ADD_INT * T3.X, KC0[3].X, T0.X,
+; EG-NEXT: ALU clause starting at 21:
+; EG-NEXT: BFE_INT T0.Z, T1.X, 0.0, literal.x,
+; EG-NEXT: BFE_INT * T0.W, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_INT T1.W, T3.X, 0.0, literal.x,
+; EG-NEXT: MULLO_INT * T0.Y, PV.Z, PV.W,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x,
+; EG-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; EG-NEXT: MAX_INT T0.W, PV.W, literal.x,
+; EG-NEXT: ADD_INT * T1.W, KC0[2].Y, T0.X,
+; EG-NEXT: -128(nan), 0(0.000000e+00)
+; EG-NEXT: AND_INT T2.W, PS, literal.x,
+; EG-NEXT: MIN_INT * T0.W, PV.W, literal.y,
+; EG-NEXT: 3(4.203895e-45), 127(1.779649e-43)
+; EG-NEXT: AND_INT T0.W, PS, literal.x,
+; EG-NEXT: LSHL * T2.W, PV.W, literal.y,
+; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; EG-NEXT: LSHL T0.X, PV.W, PS,
+; EG-NEXT: LSHL * T0.W, literal.x, PS,
+; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; EG-NEXT: MOV T0.Y, 0.0,
+; EG-NEXT: MOV * T0.Z, 0.0,
+; EG-NEXT: LSHR * T1.X, T1.W, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; CM-LABEL: i8_mad_sat_16:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 0 @8
+; CM-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 1 @10
+; CM-NEXT: ALU 26, @21, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT MSKOR T1.XW, T0.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 8:
+; CM-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; CM-NEXT: Fetch clause starting at 10:
+; CM-NEXT: VTX_READ_8 T3.X, T3.X, 0, #1
+; CM-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; CM-NEXT: ALU clause starting at 14:
+; CM-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; CM-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; CM-NEXT: ADD_INT * T1.X, KC0[3].X, PV.X,
+; CM-NEXT: ALU clause starting at 19:
+; CM-NEXT: ADD_INT * T2.X, KC0[2].W, T0.X,
+; CM-NEXT: ADD_INT * T3.X, KC0[2].Z, T0.X,
+; CM-NEXT: ALU clause starting at 21:
+; CM-NEXT: BFE_INT T0.Y, T1.X, 0.0, literal.x,
+; CM-NEXT: BFE_INT T0.Z, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; CM-NEXT: BFE_INT * T0.W, T3.X, 0.0, literal.x, BS:VEC_201
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.Z, T0.W,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.Z, T0.W,
+; CM-NEXT: MULLO_INT T0.Z, T0.Z, T0.W,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.Z, T0.W,
+; CM-NEXT: ADD_INT * T0.W, PV.Z, T0.Y,
+; CM-NEXT: BFE_INT * T0.W, PV.W, 0.0, literal.x,
+; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; CM-NEXT: MAX_INT T0.Z, PV.W, literal.x,
+; CM-NEXT: ADD_INT * T0.W, KC0[2].Y, T0.X,
+; CM-NEXT: -128(nan), 0(0.000000e+00)
+; CM-NEXT: AND_INT T1.Z, PV.W, literal.x,
+; CM-NEXT: MIN_INT * T1.W, PV.Z, literal.y,
+; CM-NEXT: 3(4.203895e-45), 127(1.779649e-43)
+; CM-NEXT: AND_INT T0.Z, PV.W, literal.x,
+; CM-NEXT: LSHL * T1.W, PV.Z, literal.y,
+; CM-NEXT: 255(3.573311e-43), 3(4.203895e-45)
+; CM-NEXT: LSHL T1.X, PV.Z, PV.W,
+; CM-NEXT: LSHL * T1.W, literal.x, PV.W,
+; CM-NEXT: 255(3.573311e-43), 0(0.000000e+00)
+; CM-NEXT: MOV T1.Y, 0.0,
+; CM-NEXT: MOV * T1.Z, 0.0,
+; CM-NEXT: LSHR * T0.X, T0.W, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i8_mad_sat_16:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_mov_b32 s20, SCRATCH_RSRC_DWORD0
+; GCN-NEXT: s_mov_b32 s21, SCRATCH_RSRC_DWORD1
+; GCN-NEXT: s_mov_b32 s22, -1
+; GCN-NEXT: s_mov_b32 s23, 0xe8f000
+; GCN-NEXT: s_add_u32 s20, s20, s11
+; GCN-NEXT: s_addc_u32 s21, s21, 0
+; GCN-NEXT: s_load_dword s8, s[4:5], 0x11
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s9, s8, 4
+; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: buffer_load_dword v1, v1, s[20:23], 0 offen
+; GCN-NEXT: buffer_load_dword v0, v0, s[20:23], 0 offen
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s10, 0
+; GCN-NEXT: s_mov_b64 s[14:15], s[10:11]
+; GCN-NEXT: s_mov_b64 s[18:19], s[10:11]
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_mov_b64 s[8:9], s[2:3]
+; GCN-NEXT: s_mov_b64 s[12:13], s[4:5]
+; GCN-NEXT: s_mov_b64 s[16:17], s[6:7]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_sbyte v2, v[0:1], s[12:15], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v3, v[0:1], s[8:11], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v4, v[0:1], s[16:19], 0 addr64
+; GCN-NEXT: s_movk_i32 s2, 0xff80
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mad_u32_u24 v2, v2, v3, v4
+; GCN-NEXT: v_bfe_i32 v2, v2, 0, 16
+; GCN-NEXT: v_mov_b32_e32 v3, 0x7f
+; GCN-NEXT: v_med3_i32 v2, v2, s2, v3
+; GCN-NEXT: s_mov_b64 s[2:3], s[10:11]
+; GCN-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64
+; GCN-NEXT: s_endpgm
+;
+; SI-LABEL: i8_mad_sat_16:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
+; SI-NEXT: s_load_dword s0, s[4:5], 0x44
+; SI-NEXT: s_mov_b32 s89, SCRATCH_RSRC_DWORD1
+; SI-NEXT: s_mov_b32 s90, -1
+; SI-NEXT: s_mov_b32 s91, 0xe80000
+; SI-NEXT: s_add_u32 s88, s88, s11
+; SI-NEXT: s_addc_u32 s89, s89, 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_add_i32 s1, s0, 4
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_load_dword v6, v0, s[88:91], 0 offen
+; SI-NEXT: v_mov_b32_e32 v0, s1
+; SI-NEXT: buffer_load_dword v7, v0, s[88:91], 0 offen
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_mov_b32_e32 v5, s7
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_u32_e32 v0, vcc, s2, v6
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
+; SI-NEXT: v_add_u32_e32 v2, vcc, s4, v6
+; SI-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; SI-NEXT: v_add_u32_e32 v4, vcc, s6, v6
+; SI-NEXT: v_addc_u32_e32 v5, vcc, v5, v7, vcc
+; SI-NEXT: flat_load_sbyte v0, v[0:1]
+; SI-NEXT: flat_load_sbyte v1, v[2:3]
+; SI-NEXT: flat_load_sbyte v2, v[4:5]
+; SI-NEXT: v_mov_b32_e32 v3, s1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mad_u16 v0, v1, v0, v2
+; SI-NEXT: v_max_i16_e32 v0, 0xff80, v0
+; SI-NEXT: v_min_i16_e32 v2, 0x7f, v0
+; SI-NEXT: v_add_u32_e32 v0, vcc, s0, v6
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v3, v7, vcc
+; SI-NEXT: flat_store_byte v[0:1], v2
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: i8_mad_sat_16:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; VI-NEXT: s_load_dword s0, s[4:5], 0x44
+; VI-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; VI-NEXT: s_mov_b32 s14, -1
+; VI-NEXT: s_mov_b32 s15, 0xe80000
+; VI-NEXT: s_add_u32 s12, s12, s11
+; VI-NEXT: s_addc_u32 s13, s13, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_i32 s1, s0, 4
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: buffer_load_dword v6, v0, s[12:15], 0 offen
+; VI-NEXT: v_mov_b32_e32 v0, s1
+; VI-NEXT: buffer_load_dword v7, v0, s[12:15], 0 offen
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_mov_b32_e32 v5, s7
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v6
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v7, vcc
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v6
+; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v6
+; VI-NEXT: v_addc_u32_e32 v5, vcc, v5, v7, vcc
+; VI-NEXT: flat_load_sbyte v0, v[0:1]
+; VI-NEXT: flat_load_sbyte v1, v[2:3]
+; VI-NEXT: flat_load_sbyte v2, v[4:5]
+; VI-NEXT: v_mov_b32_e32 v3, s1
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mad_u16 v0, v1, v0, v2
+; VI-NEXT: v_max_i16_e32 v0, 0xff80, v0
+; VI-NEXT: v_min_i16_e32 v2, 0x7f, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v6
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v3, v7, vcc
+; VI-NEXT: flat_store_byte v[0:1], v2
+; VI-NEXT: s_endpgm
entry:
%retval.0.i = load i64, ptr addrspace(5) %idx
%arrayidx = getelementptr inbounds i8, ptr addrspace(1) %in0, i64 %retval.0.i
@@ -187,16 +912,201 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}i8_mad_32:
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 8
-; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
define amdgpu_kernel void @i8_mad_32(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(5) %idx) {
+; EG-LABEL: i8_mad_32:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @8
+; EG-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @10
+; EG-NEXT: ALU 9, @21, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 8:
+; EG-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; EG-NEXT: Fetch clause starting at 10:
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; EG-NEXT: ALU clause starting at 14:
+; EG-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; EG-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; EG-NEXT: ADD_INT * T1.X, KC0[2].W, PV.X,
+; EG-NEXT: ALU clause starting at 19:
+; EG-NEXT: ADD_INT T2.X, KC0[2].Z, T0.X,
+; EG-NEXT: ADD_INT * T0.X, KC0[3].X, T0.X,
+; EG-NEXT: ALU clause starting at 21:
+; EG-NEXT: BFE_INT T0.Z, T1.X, 0.0, literal.x,
+; EG-NEXT: BFE_INT * T0.W, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_INT T1.W, T0.X, 0.0, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, PV.W, PV.Z,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: BFE_INT T0.X, PV.W, 0.0, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 16(2.242078e-44), 2(2.802597e-45)
+;
+; CM-LABEL: i8_mad_32:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 0 @8
+; CM-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 1 @10
+; CM-NEXT: ALU 12, @21, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 8:
+; CM-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; CM-NEXT: Fetch clause starting at 10:
+; CM-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; CM-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; CM-NEXT: ALU clause starting at 14:
+; CM-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; CM-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; CM-NEXT: ADD_INT * T1.X, KC0[3].X, PV.X,
+; CM-NEXT: ALU clause starting at 19:
+; CM-NEXT: ADD_INT * T2.X, KC0[2].W, T0.X,
+; CM-NEXT: ADD_INT * T0.X, KC0[2].Z, T0.X,
+; CM-NEXT: ALU clause starting at 21:
+; CM-NEXT: BFE_INT T0.Y, T1.X, 0.0, literal.x,
+; CM-NEXT: BFE_INT T0.Z, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; CM-NEXT: BFE_INT * T0.W, T0.X, 0.0, literal.x, BS:VEC_201
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T0.Z,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.Y,
+; CM-NEXT: BFE_INT * T0.X, PV.W, 0.0, literal.x,
+; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; CM-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+;
+; GCN-LABEL: i8_mad_32:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_mov_b32 s24, SCRATCH_RSRC_DWORD0
+; GCN-NEXT: s_mov_b32 s25, SCRATCH_RSRC_DWORD1
+; GCN-NEXT: s_mov_b32 s26, -1
+; GCN-NEXT: s_mov_b32 s27, 0xe8f000
+; GCN-NEXT: s_add_u32 s24, s24, s11
+; GCN-NEXT: s_addc_u32 s25, s25, 0
+; GCN-NEXT: s_load_dword s8, s[4:5], 0x11
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s9, s8, 4
+; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: buffer_load_dword v1, v1, s[24:27], 0 offen
+; GCN-NEXT: buffer_load_dword v0, v0, s[24:27], 0 offen
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s14, 0
+; GCN-NEXT: s_mov_b32 s15, s11
+; GCN-NEXT: s_mov_b64 s[18:19], s[14:15]
+; GCN-NEXT: s_mov_b64 s[22:23], s[14:15]
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_mov_b64 s[12:13], s[2:3]
+; GCN-NEXT: s_mov_b64 s[16:17], s[4:5]
+; GCN-NEXT: s_mov_b64 s[20:21], s[6:7]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_sbyte v2, v[0:1], s[12:15], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v3, v[0:1], s[16:19], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v0, v[0:1], s[20:23], 0 addr64
+; GCN-NEXT: s_mov_b32 s10, -1
+; GCN-NEXT: s_mov_b32 s8, s0
+; GCN-NEXT: s_mov_b32 s9, s1
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mad_u32_u24 v0, v1, v2, v0
+; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GCN-NEXT: buffer_store_dword v0, off, s[8:11], 0
+; GCN-NEXT: s_endpgm
+;
+; SI-LABEL: i8_mad_32:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
+; SI-NEXT: s_load_dword s0, s[4:5], 0x44
+; SI-NEXT: s_mov_b32 s89, SCRATCH_RSRC_DWORD1
+; SI-NEXT: s_mov_b32 s90, -1
+; SI-NEXT: s_mov_b32 s91, 0xe80000
+; SI-NEXT: s_add_u32 s88, s88, s11
+; SI-NEXT: s_addc_u32 s89, s89, 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_add_i32 s1, s0, 4
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_load_dword v4, v0, s[88:91], 0 offen
+; SI-NEXT: v_mov_b32_e32 v0, s1
+; SI-NEXT: buffer_load_dword v5, v0, s[88:91], 0 offen
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_mov_b32_e32 v6, s7
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; SI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; SI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; SI-NEXT: flat_load_sbyte v0, v[0:1]
+; SI-NEXT: flat_load_sbyte v1, v[2:3]
+; SI-NEXT: flat_load_sbyte v2, v[4:5]
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mad_u16 v0, v0, v1, v2
+; SI-NEXT: v_bfe_i32 v0, v0, 0, 16
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: i8_mad_32:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; VI-NEXT: s_load_dword s0, s[4:5], 0x44
+; VI-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; VI-NEXT: s_mov_b32 s14, -1
+; VI-NEXT: s_mov_b32 s15, 0xe80000
+; VI-NEXT: s_add_u32 s12, s12, s11
+; VI-NEXT: s_addc_u32 s13, s13, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_i32 s1, s0, 4
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: buffer_load_dword v4, v0, s[12:15], 0 offen
+; VI-NEXT: v_mov_b32_e32 v0, s1
+; VI-NEXT: buffer_load_dword v5, v0, s[12:15], 0 offen
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_mov_b32_e32 v6, s7
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; VI-NEXT: flat_load_sbyte v0, v[0:1]
+; VI-NEXT: flat_load_sbyte v1, v[2:3]
+; VI-NEXT: flat_load_sbyte v2, v[4:5]
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mad_u16 v0, v0, v1, v2
+; VI-NEXT: v_bfe_i32 v0, v0, 0, 16
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], 0
+; VI-NEXT: s_endpgm
entry:
%retval.0.i = load i64, ptr addrspace(5) %idx
%arrayidx = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %retval.0.i
@@ -215,16 +1125,207 @@ entry:
ret void
}
-; FUNC-LABEL: {{^}}i8_mad_64:
-; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
-; The result must be sign-extended
-; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
-; EG: 8
-; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
-; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
define amdgpu_kernel void @i8_mad_64(ptr addrspace(1) %out, ptr addrspace(1) %a, ptr addrspace(1) %b, ptr addrspace(1) %c, ptr addrspace(5) %idx) {
+; EG-LABEL: i8_mad_64:
+; EG: ; %bb.0: ; %entry
+; EG-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 0 @8
+; EG-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; EG-NEXT: TEX 1 @10
+; EG-NEXT: ALU 11, @21, KC0[CB0:0-32], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
+; EG-NEXT: CF_END
+; EG-NEXT: PAD
+; EG-NEXT: Fetch clause starting at 8:
+; EG-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; EG-NEXT: Fetch clause starting at 10:
+; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; EG-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; EG-NEXT: ALU clause starting at 14:
+; EG-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; EG-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; EG-NEXT: ADD_INT * T1.X, KC0[2].W, PV.X,
+; EG-NEXT: ALU clause starting at 19:
+; EG-NEXT: ADD_INT T2.X, KC0[2].Z, T0.X,
+; EG-NEXT: ADD_INT * T0.X, KC0[3].X, T0.X,
+; EG-NEXT: ALU clause starting at 21:
+; EG-NEXT: BFE_INT T0.Z, T1.X, 0.0, literal.x,
+; EG-NEXT: BFE_INT * T0.W, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: BFE_INT T1.W, T0.X, 0.0, literal.x,
+; EG-NEXT: MULLO_INT * T0.X, PV.W, PV.Z,
+; EG-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; EG-NEXT: ADD_INT * T0.W, PS, PV.W,
+; EG-NEXT: BFE_INT T0.X, PV.W, 0.0, literal.x,
+; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
+; EG-NEXT: 16(2.242078e-44), 2(2.802597e-45)
+; EG-NEXT: ASHR * T0.Y, PV.X, literal.x,
+; EG-NEXT: 31(4.344025e-44), 0(0.000000e+00)
+;
+; CM-LABEL: i8_mad_64:
+; CM: ; %bb.0: ; %entry
+; CM-NEXT: ALU 4, @14, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 0 @8
+; CM-NEXT: ALU 1, @19, KC0[CB0:0-32], KC1[]
+; CM-NEXT: TEX 1 @10
+; CM-NEXT: ALU 13, @21, KC0[CB0:0-32], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
+; CM-NEXT: CF_END
+; CM-NEXT: PAD
+; CM-NEXT: Fetch clause starting at 8:
+; CM-NEXT: VTX_READ_8 T1.X, T1.X, 0, #1
+; CM-NEXT: Fetch clause starting at 10:
+; CM-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
+; CM-NEXT: VTX_READ_8 T2.X, T2.X, 0, #1
+; CM-NEXT: ALU clause starting at 14:
+; CM-NEXT: LSHR * T0.W, KC0[3].Y, literal.x,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: MOVA_INT * AR.x (MASKED), PV.W,
+; CM-NEXT: MOV * T0.X, T(0 + AR.x).X+,
+; CM-NEXT: ADD_INT * T1.X, KC0[3].X, PV.X,
+; CM-NEXT: ALU clause starting at 19:
+; CM-NEXT: ADD_INT * T2.X, KC0[2].W, T0.X,
+; CM-NEXT: ADD_INT * T0.X, KC0[2].Z, T0.X,
+; CM-NEXT: ALU clause starting at 21:
+; CM-NEXT: BFE_INT T0.Y, T1.X, 0.0, literal.x,
+; CM-NEXT: BFE_INT T0.Z, T2.X, 0.0, literal.x, BS:VEC_120/SCL_212
+; CM-NEXT: BFE_INT * T0.W, T0.X, 0.0, literal.x, BS:VEC_201
+; CM-NEXT: 8(1.121039e-44), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T0.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T0.Z,
+; CM-NEXT: ADD_INT * T0.W, PV.X, T0.Y,
+; CM-NEXT: BFE_INT * T0.X, PV.W, 0.0, literal.x,
+; CM-NEXT: 16(2.242078e-44), 0(0.000000e+00)
+; CM-NEXT: LSHR T1.X, KC0[2].Y, literal.x,
+; CM-NEXT: ASHR * T0.Y, PV.X, literal.y,
+; CM-NEXT: 2(2.802597e-45), 31(4.344025e-44)
+;
+; GCN-LABEL: i8_mad_64:
+; GCN: ; %bb.0: ; %entry
+; GCN-NEXT: s_mov_b32 s24, SCRATCH_RSRC_DWORD0
+; GCN-NEXT: s_mov_b32 s25, SCRATCH_RSRC_DWORD1
+; GCN-NEXT: s_mov_b32 s26, -1
+; GCN-NEXT: s_mov_b32 s27, 0xe8f000
+; GCN-NEXT: s_add_u32 s24, s24, s11
+; GCN-NEXT: s_addc_u32 s25, s25, 0
+; GCN-NEXT: s_load_dword s8, s[4:5], 0x11
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_add_i32 s9, s8, 4
+; GCN-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
+; GCN-NEXT: v_mov_b32_e32 v0, s8
+; GCN-NEXT: v_mov_b32_e32 v1, s9
+; GCN-NEXT: buffer_load_dword v1, v1, s[24:27], 0 offen
+; GCN-NEXT: buffer_load_dword v0, v0, s[24:27], 0 offen
+; GCN-NEXT: s_mov_b32 s11, 0xf000
+; GCN-NEXT: s_mov_b32 s14, 0
+; GCN-NEXT: s_mov_b32 s15, s11
+; GCN-NEXT: s_mov_b64 s[18:19], s[14:15]
+; GCN-NEXT: s_mov_b64 s[22:23], s[14:15]
+; GCN-NEXT: s_waitcnt lgkmcnt(0)
+; GCN-NEXT: s_mov_b64 s[12:13], s[2:3]
+; GCN-NEXT: s_mov_b64 s[16:17], s[4:5]
+; GCN-NEXT: s_mov_b64 s[20:21], s[6:7]
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: buffer_load_sbyte v2, v[0:1], s[12:15], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v3, v[0:1], s[16:19], 0 addr64
+; GCN-NEXT: buffer_load_sbyte v0, v[0:1], s[20:23], 0 addr64
+; GCN-NEXT: s_mov_b32 s10, -1
+; GCN-NEXT: s_mov_b32 s8, s0
+; GCN-NEXT: s_mov_b32 s9, s1
+; GCN-NEXT: s_waitcnt vmcnt(2)
+; GCN-NEXT: v_and_b32_e32 v1, 0xffff, v2
+; GCN-NEXT: s_waitcnt vmcnt(1)
+; GCN-NEXT: v_and_b32_e32 v2, 0xffff, v3
+; GCN-NEXT: s_waitcnt vmcnt(0)
+; GCN-NEXT: v_mad_u32_u24 v0, v1, v2, v0
+; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16
+; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; GCN-NEXT: s_endpgm
+;
+; SI-LABEL: i8_mad_64:
+; SI: ; %bb.0: ; %entry
+; SI-NEXT: s_mov_b32 s88, SCRATCH_RSRC_DWORD0
+; SI-NEXT: s_load_dword s0, s[4:5], 0x44
+; SI-NEXT: s_mov_b32 s89, SCRATCH_RSRC_DWORD1
+; SI-NEXT: s_mov_b32 s90, -1
+; SI-NEXT: s_mov_b32 s91, 0xe80000
+; SI-NEXT: s_add_u32 s88, s88, s11
+; SI-NEXT: s_addc_u32 s89, s89, 0
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: s_add_i32 s1, s0, 4
+; SI-NEXT: v_mov_b32_e32 v0, s0
+; SI-NEXT: buffer_load_dword v4, v0, s[88:91], 0 offen
+; SI-NEXT: v_mov_b32_e32 v0, s1
+; SI-NEXT: buffer_load_dword v5, v0, s[88:91], 0 offen
+; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; SI-NEXT: s_waitcnt lgkmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v1, s3
+; SI-NEXT: v_mov_b32_e32 v3, s5
+; SI-NEXT: v_mov_b32_e32 v6, s7
+; SI-NEXT: s_mov_b32 s3, 0xf000
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; SI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; SI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; SI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; SI-NEXT: v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; SI-NEXT: flat_load_sbyte v0, v[0:1]
+; SI-NEXT: flat_load_sbyte v1, v[2:3]
+; SI-NEXT: flat_load_sbyte v2, v[4:5]
+; SI-NEXT: s_mov_b32 s2, -1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mad_u16 v0, v0, v1, v2
+; SI-NEXT: v_bfe_i32 v0, v0, 0, 16
+; SI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT: s_endpgm
+;
+; VI-LABEL: i8_mad_64:
+; VI: ; %bb.0: ; %entry
+; VI-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; VI-NEXT: s_load_dword s0, s[4:5], 0x44
+; VI-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; VI-NEXT: s_mov_b32 s14, -1
+; VI-NEXT: s_mov_b32 s15, 0xe80000
+; VI-NEXT: s_add_u32 s12, s12, s11
+; VI-NEXT: s_addc_u32 s13, s13, 0
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: s_add_i32 s1, s0, 4
+; VI-NEXT: v_mov_b32_e32 v0, s0
+; VI-NEXT: buffer_load_dword v4, v0, s[12:15], 0 offen
+; VI-NEXT: v_mov_b32_e32 v0, s1
+; VI-NEXT: buffer_load_dword v5, v0, s[12:15], 0 offen
+; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
+; VI-NEXT: s_waitcnt lgkmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v1, s3
+; VI-NEXT: v_mov_b32_e32 v3, s5
+; VI-NEXT: v_mov_b32_e32 v6, s7
+; VI-NEXT: s_mov_b32 s3, 0xf000
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, s2, v4
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; VI-NEXT: v_add_u32_e32 v2, vcc, s4, v4
+; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; VI-NEXT: v_add_u32_e32 v4, vcc, s6, v4
+; VI-NEXT: v_addc_u32_e32 v5, vcc, v6, v5, vcc
+; VI-NEXT: flat_load_sbyte v0, v[0:1]
+; VI-NEXT: flat_load_sbyte v1, v[2:3]
+; VI-NEXT: flat_load_sbyte v2, v[4:5]
+; VI-NEXT: s_mov_b32 s2, -1
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mad_u16 v0, v0, v1, v2
+; VI-NEXT: v_bfe_i32 v0, v0, 0, 16
+; VI-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
+; VI-NEXT: s_endpgm
entry:
%retval.0.i = load i64, ptr addrspace(5) %idx
%arrayidx = getelementptr inbounds i8, ptr addrspace(1) %a, i64 %retval.0.i
@@ -248,17 +1349,236 @@ entry:
; had a chance to form mul24. The mul combine would then see
; extractelement with no known bits and fail. All of the mul/add
; combos in this loop should form v_mad_u32_u24.
-
-; FUNC-LABEL: {{^}}mad24_known_bits_destroyed:
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
-; GCN: v_mad_u32_u24
define void @mad24_known_bits_destroyed(i32 %arg, <4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3, i32 %arg4, i32 %arg5, i32 %arg6, ptr addrspace(1) %arg7, ptr addrspace(1) %arg8) #0 {
+; EG-LABEL: mad24_known_bits_destroyed:
+; EG: ; %bb.0: ; %bb
+; EG-NEXT: ALU 21, @12, KC0[CB0:0-32], KC1[]
+; EG-NEXT: LOOP_START_DX10 @11
+; EG-NEXT: ALU 8, @34, KC0[], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T2.X, 0
+; EG-NEXT: ALU 14, @43, KC0[], KC1[]
+; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 0
+; EG-NEXT: ALU_PUSH_BEFORE 3, @58, KC0[], KC1[]
+; EG-NEXT: JUMP @10 POP:1
+; EG-NEXT: LOOP_BREAK @10
+; EG-NEXT: POP @10 POP:1
+; EG-NEXT: END_LOOP @2
+; EG-NEXT: CF_END
+; EG-NEXT: ALU clause starting at 12:
+; EG-NEXT: MOV * T0.W, KC0[5].X,
+; EG-NEXT: MOV * T0.Z, KC0[4].W,
+; EG-NEXT: MOV * T0.Y, KC0[4].Z,
+; EG-NEXT: MOV T0.X, KC0[2].Y,
+; EG-NEXT: AND_INT * T1.Y, KC0[4].X, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: AND_INT T1.Z, KC0[3].W, literal.x,
+; EG-NEXT: AND_INT T1.W, KC0[3].Z, literal.x,
+; EG-NEXT: MOV * T2.W, KC0[7].Y,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: LSHR T1.X, PS, literal.x,
+; EG-NEXT: AND_INT T2.Y, KC0[6].Y, literal.y,
+; EG-NEXT: MOV T2.Z, KC0[6].X,
+; EG-NEXT: MOV * T2.W, KC0[5].W,
+; EG-NEXT: 2(2.802597e-45), 16777215(2.350989e-38)
+; EG-NEXT: MOV * T3.W, KC0[7].X,
+; EG-NEXT: LSHR T2.X, PV.W, literal.x,
+; EG-NEXT: MOV T3.Y, KC0[5].Z,
+; EG-NEXT: MOV T3.Z, KC0[6].Z,
+; EG-NEXT: MOV * T3.W, KC0[6].W,
+; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; EG-NEXT: MOV * T4.W, KC0[4].Y,
+; EG-NEXT: ALU clause starting at 34:
+; EG-NEXT: MULLO_INT * T0.X, T0.X, T2.Y,
+; EG-NEXT: ADD_INT * T4.W, PS, T3.Z,
+; EG-NEXT: AND_INT * T4.W, PV.W, literal.x,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: MULLO_INT * T0.X, PV.W, T2.Y,
+; EG-NEXT: MULLO_INT * T0.W, T0.W, T1.Y,
+; EG-NEXT: MULLO_INT * T0.Z, T0.Z, T1.Z,
+; EG-NEXT: MULLO_INT * T0.Y, T0.Y, T1.W,
+; EG-NEXT: ADD_INT * T0.X, T0.X, T3.Z,
+; EG-NEXT: ALU clause starting at 43:
+; EG-NEXT: ADD_INT * T4.W, T0.Y, T3.Y,
+; EG-NEXT: AND_INT T4.W, PV.W, literal.x,
+; EG-NEXT: ADD_INT * T5.W, T0.Z, T2.W,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: AND_INT T0.Z, PS, literal.x,
+; EG-NEXT: ADD_INT T0.W, T0.W, T2.Z,
+; EG-NEXT: MULLO_INT * T0.Y, PV.W, T1.W,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T0.Y, PS, T3.Y,
+; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
+; EG-NEXT: MULLO_INT * T0.Z, PV.Z, T1.Z,
+; EG-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; EG-NEXT: ADD_INT T0.Z, PS, T2.W,
+; EG-NEXT: MULLO_INT * T0.W, PV.W, T1.Y,
+; EG-NEXT: ADD_INT * T0.W, PS, T2.Z,
+; EG-NEXT: ALU clause starting at 58:
+; EG-NEXT: ADD_INT * T3.W, T3.W, literal.x,
+; EG-NEXT: -1(nan), 0(0.000000e+00)
+; EG-NEXT: SETE_INT * T4.W, PV.W, 0.0,
+; EG-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+;
+; CM-LABEL: mad24_known_bits_destroyed:
+; CM: ; %bb.0: ; %bb
+; CM-NEXT: ALU 22, @12, KC0[CB0:0-32], KC1[]
+; CM-NEXT: LOOP_START_DX10 @11
+; CM-NEXT: ALU 23, @35, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0.X, T2.X
+; CM-NEXT: ALU 23, @59, KC0[], KC1[]
+; CM-NEXT: MEM_RAT_CACHELESS STORE_DWORD T0, T1.X
+; CM-NEXT: ALU_PUSH_BEFORE 3, @83, KC0[], KC1[]
+; CM-NEXT: JUMP @10 POP:1
+; CM-NEXT: LOOP_BREAK @10
+; CM-NEXT: POP @10 POP:1
+; CM-NEXT: END_LOOP @2
+; CM-NEXT: CF_END
+; CM-NEXT: ALU clause starting at 12:
+; CM-NEXT: MOV * T0.W, KC0[5].X,
+; CM-NEXT: MOV * T0.Z, KC0[4].W,
+; CM-NEXT: MOV * T0.Y, KC0[4].Z,
+; CM-NEXT: MOV T0.X, KC0[2].Y,
+; CM-NEXT: AND_INT * T1.Y, KC0[4].X, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: AND_INT T1.Z, KC0[3].W, literal.x,
+; CM-NEXT: AND_INT * T1.W, KC0[3].Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: AND_INT T2.Y, KC0[6].Y, literal.x,
+; CM-NEXT: MOV T2.Z, KC0[6].X,
+; CM-NEXT: MOV * T2.W, KC0[7].Y,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: LSHR T1.X, PV.W, literal.x,
+; CM-NEXT: MOV T3.Y, KC0[5].W,
+; CM-NEXT: MOV T3.Z, KC0[5].Z,
+; CM-NEXT: MOV * T2.W, KC0[7].X,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: LSHR T2.X, PV.W, literal.x,
+; CM-NEXT: MOV T4.Y, KC0[6].Z,
+; CM-NEXT: MOV T4.Z, KC0[6].W,
+; CM-NEXT: MOV * T2.W, KC0[4].Y,
+; CM-NEXT: 2(2.802597e-45), 0(0.000000e+00)
+; CM-NEXT: ALU clause starting at 35:
+; CM-NEXT: MULLO_INT T0.X, T0.X, T2.Y,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.X, T2.Y,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.X, T2.Y,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.X, T2.Y,
+; CM-NEXT: ADD_INT * T2.W, PV.X, T4.Y,
+; CM-NEXT: AND_INT * T2.W, PV.W, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X, T2.W, T2.Y,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T2.W, T2.Y,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T2.W, T2.Y,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T2.W, T2.Y,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT * T0.W, T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.Z, T1.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.Z, T1.Z,
+; CM-NEXT: MULLO_INT T0.Z, T0.Z, T1.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.Z, T1.Z,
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.Y, T1.W,
+; CM-NEXT: MULLO_INT T0.Y, T0.Y, T1.W,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.Y, T1.W,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.Y, T1.W,
+; CM-NEXT: ADD_INT * T0.X, T0.X, T4.Y,
+; CM-NEXT: ALU clause starting at 59:
+; CM-NEXT: ADD_INT * T2.W, T0.Y, T3.Z,
+; CM-NEXT: ADD_INT T0.Z, T0.Z, T3.Y,
+; CM-NEXT: AND_INT * T2.W, PV.W, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT T0.Y, T2.W, T1.W,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T2.W, T1.W,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T2.W, T1.W,
+; CM-NEXT: ADD_INT T0.Y, PV.Y, T3.Z,
+; CM-NEXT: ADD_INT T5.Z, T0.W, T2.Z, BS:VEC_021/SCL_122
+; CM-NEXT: AND_INT * T0.W, T0.Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.W, T1.Z,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T1.Z,
+; CM-NEXT: MULLO_INT T0.Z, T0.W, T1.Z,
+; CM-NEXT: MULLO_INT * T0.W (MASKED), T0.W, T1.Z,
+; CM-NEXT: ADD_INT T0.Z, PV.Z, T3.Y,
+; CM-NEXT: AND_INT * T0.W, T5.Z, literal.x,
+; CM-NEXT: 16777215(2.350989e-38), 0(0.000000e+00)
+; CM-NEXT: MULLO_INT T0.X (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.Y (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT T0.Z (MASKED), T0.W, T1.Y,
+; CM-NEXT: MULLO_INT * T0.W, T0.W, T1.Y,
+; CM-NEXT: ADD_INT * T0.W, PV.W, T2.Z,
+; CM-NEXT: ALU clause starting at 83:
+; CM-NEXT: ADD_INT * T4.Z, T4.Z, literal.x,
+; CM-NEXT: -1(nan), 0(0.000000e+00)
+; CM-NEXT: SETE_INT * T2.W, PV.Z, 0.0,
+; CM-NEXT: PRED_SETNE_INT * ExecMask,PredicateBit (MASKED), PV.W, 0.0,
+;
+; GCN-LABEL: mad24_known_bits_destroyed:
+; GCN: ; %bb.0: ; %bb
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GCN-NEXT: v_mov_b32_e32 v5, v0
+; GCN-NEXT: v_and_b32_e32 v0, 0xffffff, v13
+; GCN-NEXT: v_and_b32_e32 v1, 0xffffff, v2
+; GCN-NEXT: v_and_b32_e32 v2, 0xffffff, v3
+; GCN-NEXT: v_and_b32_e32 v3, 0xffffff, v4
+; GCN-NEXT: s_mov_b64 s[8:9], 0
+; GCN-NEXT: s_mov_b32 s6, 0
+; GCN-NEXT: s_mov_b32 s7, 0xf000
+; GCN-NEXT: s_mov_b32 s4, s6
+; GCN-NEXT: s_mov_b32 s5, s6
+; GCN-NEXT: .LBB9_1: ; %bb19
+; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: v_mad_u32_u24 v4, v5, v0, v14
+; GCN-NEXT: s_waitcnt expcnt(0)
+; GCN-NEXT: v_mad_u32_u24 v6, v6, v1, v10
+; GCN-NEXT: v_mad_u32_u24 v7, v7, v2, v11
+; GCN-NEXT: v_mad_u32_u24 v8, v8, v3, v12
+; GCN-NEXT: v_add_i32_e32 v15, vcc, -1, v15
+; GCN-NEXT: v_mad_u32_u24 v5, v4, v0, v14
+; GCN-NEXT: v_mad_u32_u24 v6, v6, v1, v10
+; GCN-NEXT: v_mad_u32_u24 v7, v7, v2, v11
+; GCN-NEXT: v_mad_u32_u24 v8, v8, v3, v12
+; GCN-NEXT: v_cmp_eq_u32_e32 vcc, 0, v15
+; GCN-NEXT: buffer_store_dword v5, v[16:17], s[4:7], 0 addr64
+; GCN-NEXT: s_or_b64 s[8:9], vcc, s[8:9]
+; GCN-NEXT: buffer_store_dwordx4 v[5:8], v[18:19], s[4:7], 0 addr64
+; GCN-NEXT: s_andn2_b64 exec, exec, s[8:9]
+; GCN-NEXT: s_cbranch_execnz .LBB9_1
+; GCN-NEXT: ; %bb.2: ; %bb18
+; GCN-NEXT: s_or_b64 exec, exec, s[8:9]
+; GCN-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; GCN-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: mad24_known_bits_destroyed:
+; GFX8: ; %bb.0: ; %bb
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v5, v0
+; GFX8-NEXT: v_and_b32_e32 v0, 0xffffff, v13
+; GFX8-NEXT: v_and_b32_e32 v1, 0xffffff, v2
+; GFX8-NEXT: v_and_b32_e32 v2, 0xffffff, v3
+; GFX8-NEXT: v_and_b32_e32 v3, 0xffffff, v4
+; GFX8-NEXT: s_mov_b64 s[4:5], 0
+; GFX8-NEXT: .LBB9_1: ; %bb19
+; GFX8-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX8-NEXT: v_add_u32_e32 v15, vcc, -1, v15
+; GFX8-NEXT: v_mad_u32_u24 v4, v5, v0, v14
+; GFX8-NEXT: v_mad_u32_u24 v6, v6, v1, v10
+; GFX8-NEXT: v_mad_u32_u24 v7, v7, v2, v11
+; GFX8-NEXT: v_mad_u32_u24 v8, v8, v3, v12
+; GFX8-NEXT: v_cmp_eq_u32_e32 vcc, 0, v15
+; GFX8-NEXT: v_mad_u32_u24 v5, v4, v0, v14
+; GFX8-NEXT: v_mad_u32_u24 v6, v6, v1, v10
+; GFX8-NEXT: v_mad_u32_u24 v7, v7, v2, v11
+; GFX8-NEXT: v_mad_u32_u24 v8, v8, v3, v12
+; GFX8-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX8-NEXT: flat_store_dword v[16:17], v5
+; GFX8-NEXT: flat_store_dwordx4 v[18:19], v[5:8]
+; GFX8-NEXT: s_andn2_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_cbranch_execnz .LBB9_1
+; GFX8-NEXT: ; %bb.2: ; %bb18
+; GFX8-NEXT: s_or_b64 exec, exec, s[4:5]
+; GFX8-NEXT: s_waitcnt vmcnt(0)
+; GFX8-NEXT: s_setpc_b64 s[30:31]
bb:
%tmp = and i32 %arg4, 16777215
%tmp9 = extractelement <4 x i32> %arg1, i64 1
More information about the llvm-commits
mailing list