[llvm] 1bb07e1 - [AMDGPU] Precommit tests for D84518 Propagate fast math flags in frem lowering

Jay Foad via llvm-commits llvm-commits at lists.llvm.org
Wed Aug 5 01:10:27 PDT 2020


Author: Jay Foad
Date: 2020-08-05T09:09:02+01:00
New Revision: 1bb07e1b91c187d868bfe383175c2ce04ebed8b8

URL: https://github.com/llvm/llvm-project/commit/1bb07e1b91c187d868bfe383175c2ce04ebed8b8
DIFF: https://github.com/llvm/llvm-project/commit/1bb07e1b91c187d868bfe383175c2ce04ebed8b8.diff

LOG: [AMDGPU] Precommit tests for D84518 Propagate fast math flags in frem lowering

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/frem.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/frem.ll b/llvm/test/CodeGen/AMDGPU/frem.ll
index 09c9716024c2..0fc9291ac287 100644
--- a/llvm/test/CodeGen/AMDGPU/frem.ll
+++ b/llvm/test/CodeGen/AMDGPU/frem.ll
@@ -122,6 +122,125 @@ define amdgpu_kernel void @frem_f16(half addrspace(1)* %out, half addrspace(1)*
    ret void
 }
 
+define amdgpu_kernel void @fast_frem_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
+; SI-LABEL: fast_frem_f16:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s6, s2
+; SI-NEXT:    s_mov_b32 s7, s3
+; SI-NEXT:    s_mov_b32 s10, s2
+; SI-NEXT:    s_mov_b32 s11, s3
+; SI-NEXT:    buffer_load_ushort v0, off, s[4:7], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; SI-NEXT:    buffer_load_ushort v1, off, s[8:11], 0 offset:8
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT:    v_div_scale_f32 v2, vcc, v0, v1, v0
+; SI-NEXT:    v_div_scale_f32 v3, s[4:5], v1, v1, v0
+; SI-NEXT:    v_rcp_f32_e32 v4, v3
+; SI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
+; SI-NEXT:    v_fma_f32 v5, -v3, v4, 1.0
+; SI-NEXT:    v_fma_f32 v4, v5, v4, v4
+; SI-NEXT:    v_mul_f32_e32 v5, v2, v4
+; SI-NEXT:    v_fma_f32 v6, -v3, v5, v2
+; SI-NEXT:    v_fma_f32 v5, v6, v4, v5
+; SI-NEXT:    v_fma_f32 v2, -v3, v5, v2
+; SI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
+; SI-NEXT:    v_div_fmas_f32 v2, v2, v4, v5
+; SI-NEXT:    v_div_fixup_f32 v2, v2, v1, v0
+; SI-NEXT:    v_trunc_f32_e32 v2, v2
+; SI-NEXT:    v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
+; SI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT:    buffer_store_short v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; CI-LABEL: fast_frem_f16:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; CI-NEXT:    s_mov_b32 s11, 0xf000
+; CI-NEXT:    s_mov_b32 s10, -1
+; CI-NEXT:    s_mov_b32 s2, s10
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b32 s8, s4
+; CI-NEXT:    s_mov_b32 s9, s5
+; CI-NEXT:    s_mov_b32 s4, s6
+; CI-NEXT:    s_mov_b32 s5, s7
+; CI-NEXT:    s_mov_b32 s3, s11
+; CI-NEXT:    s_mov_b32 s6, s10
+; CI-NEXT:    s_mov_b32 s7, s11
+; CI-NEXT:    buffer_load_ushort v0, off, s[4:7], 0
+; CI-NEXT:    buffer_load_ushort v1, off, s[0:3], 0 offset:8
+; CI-NEXT:    s_waitcnt vmcnt(1)
+; CI-NEXT:    v_cvt_f32_f16_e32 v0, v0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_cvt_f32_f16_e32 v1, v1
+; CI-NEXT:    v_div_scale_f32 v3, s[0:1], v1, v1, v0
+; CI-NEXT:    v_div_scale_f32 v2, vcc, v0, v1, v0
+; CI-NEXT:    v_rcp_f32_e32 v4, v3
+; CI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
+; CI-NEXT:    v_fma_f32 v5, -v3, v4, 1.0
+; CI-NEXT:    v_fma_f32 v4, v5, v4, v4
+; CI-NEXT:    v_mul_f32_e32 v5, v2, v4
+; CI-NEXT:    v_fma_f32 v6, -v3, v5, v2
+; CI-NEXT:    v_fma_f32 v5, v6, v4, v5
+; CI-NEXT:    v_fma_f32 v2, -v3, v5, v2
+; CI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
+; CI-NEXT:    v_div_fmas_f32 v2, v2, v4, v5
+; CI-NEXT:    v_div_fixup_f32 v2, v2, v1, v0
+; CI-NEXT:    v_trunc_f32_e32 v2, v2
+; CI-NEXT:    v_fma_f32 v0, -v2, v1, v0
+; CI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 0
+; CI-NEXT:    v_cvt_f16_f32_e32 v0, v0
+; CI-NEXT:    buffer_store_short v0, off, s[8:11], 0
+; CI-NEXT:    s_endpgm
+;
+; VI-LABEL: fast_frem_f16:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_add_u32 s0, s0, 8
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    flat_load_ushort v4, v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_load_ushort v2, v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_waitcnt vmcnt(1) lgkmcnt(1)
+; VI-NEXT:    v_cvt_f32_f16_e32 v3, v4
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_cvt_f32_f16_e32 v5, v2
+; VI-NEXT:    v_rcp_f32_e32 v5, v5
+; VI-NEXT:    v_mul_f32_e32 v3, v3, v5
+; VI-NEXT:    v_cvt_f16_f32_e32 v3, v3
+; VI-NEXT:    v_div_fixup_f16 v3, v3, v2, v4
+; VI-NEXT:    v_trunc_f16_e32 v3, v3
+; VI-NEXT:    v_fma_f16 v2, -v3, v2, v4
+; VI-NEXT:    flat_store_short v[0:1], v2
+; VI-NEXT:    s_endpgm
+                      half addrspace(1)* %in2) #0 {
+   %gep2 = getelementptr half, half addrspace(1)* %in2, i32 4
+   %r0 = load half, half addrspace(1)* %in1, align 4
+   %r1 = load half, half addrspace(1)* %gep2, align 4
+   %r2 = frem fast half %r0, %r1
+   store half %r2, half addrspace(1)* %out, align 4
+   ret void
+}
+
 define amdgpu_kernel void @unsafe_frem_f16(half addrspace(1)* %out, half addrspace(1)* %in1,
 ; SI-LABEL: unsafe_frem_f16:
 ; SI:       ; %bb.0:
@@ -327,6 +446,121 @@ define amdgpu_kernel void @frem_f32(float addrspace(1)* %out, float addrspace(1)
    ret void
 }
 
+define amdgpu_kernel void @fast_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
+; SI-LABEL: fast_frem_f32:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[8:9], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s3, 0xf000
+; SI-NEXT:    s_mov_b32 s2, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s0, s4
+; SI-NEXT:    s_mov_b32 s1, s5
+; SI-NEXT:    s_mov_b32 s4, s6
+; SI-NEXT:    s_mov_b32 s5, s7
+; SI-NEXT:    s_mov_b32 s6, s2
+; SI-NEXT:    s_mov_b32 s7, s3
+; SI-NEXT:    s_mov_b32 s10, s2
+; SI-NEXT:    s_mov_b32 s11, s3
+; SI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; SI-NEXT:    buffer_load_dword v1, off, s[8:11], 0 offset:16
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_div_scale_f32 v2, vcc, v0, v1, v0
+; SI-NEXT:    v_div_scale_f32 v3, s[4:5], v1, v1, v0
+; SI-NEXT:    v_rcp_f32_e32 v4, v3
+; SI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
+; SI-NEXT:    v_fma_f32 v5, -v3, v4, 1.0
+; SI-NEXT:    v_fma_f32 v4, v5, v4, v4
+; SI-NEXT:    v_mul_f32_e32 v5, v2, v4
+; SI-NEXT:    v_fma_f32 v6, -v3, v5, v2
+; SI-NEXT:    v_fma_f32 v5, v6, v4, v5
+; SI-NEXT:    v_fma_f32 v2, -v3, v5, v2
+; SI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
+; SI-NEXT:    v_div_fmas_f32 v2, v2, v4, v5
+; SI-NEXT:    v_div_fixup_f32 v2, v2, v1, v0
+; SI-NEXT:    v_trunc_f32_e32 v2, v2
+; SI-NEXT:    v_fma_f32 v0, -v2, v1, v0
+; SI-NEXT:    buffer_store_dword v0, off, s[0:3], 0
+; SI-NEXT:    s_endpgm
+;
+; CI-LABEL: fast_frem_f32:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; CI-NEXT:    s_mov_b32 s11, 0xf000
+; CI-NEXT:    s_mov_b32 s10, -1
+; CI-NEXT:    s_mov_b32 s2, s10
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b32 s8, s4
+; CI-NEXT:    s_mov_b32 s9, s5
+; CI-NEXT:    s_mov_b32 s4, s6
+; CI-NEXT:    s_mov_b32 s5, s7
+; CI-NEXT:    s_mov_b32 s6, s10
+; CI-NEXT:    s_mov_b32 s7, s11
+; CI-NEXT:    s_mov_b32 s3, s11
+; CI-NEXT:    buffer_load_dword v0, off, s[4:7], 0
+; CI-NEXT:    buffer_load_dword v1, off, s[0:3], 0 offset:16
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_div_scale_f32 v3, s[0:1], v1, v1, v0
+; CI-NEXT:    v_div_scale_f32 v2, vcc, v0, v1, v0
+; CI-NEXT:    v_rcp_f32_e32 v4, v3
+; CI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
+; CI-NEXT:    v_fma_f32 v5, -v3, v4, 1.0
+; CI-NEXT:    v_fma_f32 v4, v5, v4, v4
+; CI-NEXT:    v_mul_f32_e32 v5, v2, v4
+; CI-NEXT:    v_fma_f32 v6, -v3, v5, v2
+; CI-NEXT:    v_fma_f32 v5, v6, v4, v5
+; CI-NEXT:    v_fma_f32 v2, -v3, v5, v2
+; CI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
+; CI-NEXT:    v_div_fmas_f32 v2, v2, v4, v5
+; CI-NEXT:    v_div_fixup_f32 v2, v2, v1, v0
+; CI-NEXT:    v_trunc_f32_e32 v2, v2
+; CI-NEXT:    v_fma_f32 v0, -v2, v1, v0
+; CI-NEXT:    buffer_store_dword v0, off, s[8:11], 0
+; CI-NEXT:    s_endpgm
+;
+; VI-LABEL: fast_frem_f32:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    s_add_u32 s0, s0, 16
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    s_addc_u32 s1, s1, 0
+; VI-NEXT:    flat_load_dword v4, v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v3, s1
+; VI-NEXT:    v_mov_b32_e32 v2, s0
+; VI-NEXT:    flat_load_dword v2, v[2:3]
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_div_scale_f32 v5, s[0:1], v2, v2, v4
+; VI-NEXT:    v_div_scale_f32 v3, vcc, v4, v2, v4
+; VI-NEXT:    v_rcp_f32_e32 v6, v5
+; VI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 3
+; VI-NEXT:    v_fma_f32 v7, -v5, v6, 1.0
+; VI-NEXT:    v_fma_f32 v6, v7, v6, v6
+; VI-NEXT:    v_mul_f32_e32 v7, v3, v6
+; VI-NEXT:    v_fma_f32 v8, -v5, v7, v3
+; VI-NEXT:    v_fma_f32 v7, v8, v6, v7
+; VI-NEXT:    v_fma_f32 v3, -v5, v7, v3
+; VI-NEXT:    s_setreg_imm32_b32 hwreg(HW_REG_MODE, 4, 2), 0
+; VI-NEXT:    v_div_fmas_f32 v3, v3, v6, v7
+; VI-NEXT:    v_div_fixup_f32 v3, v3, v2, v4
+; VI-NEXT:    v_trunc_f32_e32 v3, v3
+; VI-NEXT:    v_fma_f32 v2, -v3, v2, v4
+; VI-NEXT:    flat_store_dword v[0:1], v2
+; VI-NEXT:    s_endpgm
+                      float addrspace(1)* %in2) #0 {
+   %gep2 = getelementptr float, float addrspace(1)* %in2, i32 4
+   %r0 = load float, float addrspace(1)* %in1, align 4
+   %r1 = load float, float addrspace(1)* %gep2, align 4
+   %r2 = frem fast float %r0, %r1
+   store float %r2, float addrspace(1)* %out, align 4
+   ret void
+}
+
 define amdgpu_kernel void @unsafe_frem_f32(float addrspace(1)* %out, float addrspace(1)* %in1,
 ; SI-LABEL: unsafe_frem_f32:
 ; SI:       ; %bb.0:
@@ -537,6 +771,134 @@ define amdgpu_kernel void @frem_f64(double addrspace(1)* %out, double addrspace(
    ret void
 }
 
+define amdgpu_kernel void @fast_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
+; SI-LABEL: fast_frem_f64:
+; SI:       ; %bb.0:
+; SI-NEXT:    s_load_dwordx4 s[8:11], s[0:1], 0x9
+; SI-NEXT:    s_load_dwordx2 s[12:13], s[0:1], 0xd
+; SI-NEXT:    s_mov_b32 s7, 0xf000
+; SI-NEXT:    s_mov_b32 s6, -1
+; SI-NEXT:    s_waitcnt lgkmcnt(0)
+; SI-NEXT:    s_mov_b32 s4, s8
+; SI-NEXT:    s_mov_b32 s5, s9
+; SI-NEXT:    s_mov_b32 s0, s10
+; SI-NEXT:    s_mov_b32 s1, s11
+; SI-NEXT:    s_mov_b32 s2, s6
+; SI-NEXT:    s_mov_b32 s3, s7
+; SI-NEXT:    s_mov_b32 s14, s6
+; SI-NEXT:    s_mov_b32 s15, s7
+; SI-NEXT:    buffer_load_dwordx2 v[0:1], off, s[0:3], 0
+; SI-NEXT:    buffer_load_dwordx2 v[2:3], off, s[12:15], 0
+; SI-NEXT:    s_waitcnt vmcnt(0)
+; SI-NEXT:    v_div_scale_f64 v[4:5], s[0:1], v[2:3], v[2:3], v[0:1]
+; SI-NEXT:    v_rcp_f64_e32 v[6:7], v[4:5]
+; SI-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
+; SI-NEXT:    v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
+; SI-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
+; SI-NEXT:    v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
+; SI-NEXT:    v_div_scale_f64 v[8:9], s[0:1], v[0:1], v[2:3], v[0:1]
+; SI-NEXT:    v_mul_f64 v[10:11], v[8:9], v[6:7]
+; SI-NEXT:    v_fma_f64 v[12:13], -v[4:5], v[10:11], v[8:9]
+; SI-NEXT:    v_cmp_eq_u32_e32 vcc, v3, v5
+; SI-NEXT:    v_cmp_eq_u32_e64 s[0:1], v1, v9
+; SI-NEXT:    s_xor_b64 vcc, s[0:1], vcc
+; SI-NEXT:    s_nop 0
+; SI-NEXT:    s_nop 0
+; SI-NEXT:    v_div_fmas_f64 v[4:5], v[12:13], v[6:7], v[10:11]
+; SI-NEXT:    v_div_fixup_f64 v[4:5], v[4:5], v[2:3], v[0:1]
+; SI-NEXT:    v_bfe_u32 v6, v5, 20, 11
+; SI-NEXT:    v_add_i32_e32 v8, vcc, 0xfffffc01, v6
+; SI-NEXT:    s_mov_b32 s1, 0xfffff
+; SI-NEXT:    s_mov_b32 s0, s6
+; SI-NEXT:    v_lshr_b64 v[6:7], s[0:1], v8
+; SI-NEXT:    v_not_b32_e32 v6, v6
+; SI-NEXT:    v_and_b32_e32 v6, v4, v6
+; SI-NEXT:    v_not_b32_e32 v7, v7
+; SI-NEXT:    v_and_b32_e32 v7, v5, v7
+; SI-NEXT:    v_and_b32_e32 v9, 0x80000000, v5
+; SI-NEXT:    v_cmp_gt_i32_e32 vcc, 0, v8
+; SI-NEXT:    v_cndmask_b32_e32 v7, v7, v9, vcc
+; SI-NEXT:    v_cmp_lt_i32_e64 s[0:1], 51, v8
+; SI-NEXT:    v_cndmask_b32_e64 v5, v7, v5, s[0:1]
+; SI-NEXT:    v_cndmask_b32_e64 v6, v6, 0, vcc
+; SI-NEXT:    v_cndmask_b32_e64 v4, v6, v4, s[0:1]
+; SI-NEXT:    v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
+; SI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[4:7], 0
+; SI-NEXT:    s_endpgm
+;
+; CI-LABEL: fast_frem_f64:
+; CI:       ; %bb.0:
+; CI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x9
+; CI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0xd
+; CI-NEXT:    s_mov_b32 s11, 0xf000
+; CI-NEXT:    s_mov_b32 s10, -1
+; CI-NEXT:    s_mov_b32 s2, s10
+; CI-NEXT:    s_waitcnt lgkmcnt(0)
+; CI-NEXT:    s_mov_b32 s8, s4
+; CI-NEXT:    s_mov_b32 s9, s5
+; CI-NEXT:    s_mov_b32 s4, s6
+; CI-NEXT:    s_mov_b32 s5, s7
+; CI-NEXT:    s_mov_b32 s6, s10
+; CI-NEXT:    s_mov_b32 s7, s11
+; CI-NEXT:    s_mov_b32 s3, s11
+; CI-NEXT:    buffer_load_dwordx2 v[0:1], off, s[4:7], 0
+; CI-NEXT:    buffer_load_dwordx2 v[2:3], off, s[0:3], 0
+; CI-NEXT:    s_waitcnt vmcnt(0)
+; CI-NEXT:    v_div_scale_f64 v[4:5], s[0:1], v[2:3], v[2:3], v[0:1]
+; CI-NEXT:    v_rcp_f64_e32 v[6:7], v[4:5]
+; CI-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
+; CI-NEXT:    v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
+; CI-NEXT:    v_fma_f64 v[8:9], -v[4:5], v[6:7], 1.0
+; CI-NEXT:    v_fma_f64 v[6:7], v[6:7], v[8:9], v[6:7]
+; CI-NEXT:    v_div_scale_f64 v[8:9], vcc, v[0:1], v[2:3], v[0:1]
+; CI-NEXT:    v_mul_f64 v[10:11], v[8:9], v[6:7]
+; CI-NEXT:    v_fma_f64 v[4:5], -v[4:5], v[10:11], v[8:9]
+; CI-NEXT:    s_nop 1
+; CI-NEXT:    v_div_fmas_f64 v[4:5], v[4:5], v[6:7], v[10:11]
+; CI-NEXT:    v_div_fixup_f64 v[4:5], v[4:5], v[2:3], v[0:1]
+; CI-NEXT:    v_trunc_f64_e32 v[4:5], v[4:5]
+; CI-NEXT:    v_fma_f64 v[0:1], -v[4:5], v[2:3], v[0:1]
+; CI-NEXT:    buffer_store_dwordx2 v[0:1], off, s[8:11], 0
+; CI-NEXT:    s_endpgm
+;
+; VI-LABEL: fast_frem_f64:
+; VI:       ; %bb.0:
+; VI-NEXT:    s_load_dwordx4 s[4:7], s[0:1], 0x24
+; VI-NEXT:    s_load_dwordx2 s[0:1], s[0:1], 0x34
+; VI-NEXT:    s_waitcnt lgkmcnt(0)
+; VI-NEXT:    v_mov_b32_e32 v2, s6
+; VI-NEXT:    v_mov_b32_e32 v3, s7
+; VI-NEXT:    v_mov_b32_e32 v4, s0
+; VI-NEXT:    v_mov_b32_e32 v5, s1
+; VI-NEXT:    flat_load_dwordx2 v[2:3], v[2:3]
+; VI-NEXT:    flat_load_dwordx2 v[4:5], v[4:5]
+; VI-NEXT:    v_mov_b32_e32 v0, s4
+; VI-NEXT:    v_mov_b32_e32 v1, s5
+; VI-NEXT:    s_waitcnt vmcnt(0) lgkmcnt(0)
+; VI-NEXT:    v_div_scale_f64 v[6:7], s[0:1], v[4:5], v[4:5], v[2:3]
+; VI-NEXT:    v_rcp_f64_e32 v[8:9], v[6:7]
+; VI-NEXT:    v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
+; VI-NEXT:    v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
+; VI-NEXT:    v_fma_f64 v[10:11], -v[6:7], v[8:9], 1.0
+; VI-NEXT:    v_fma_f64 v[8:9], v[8:9], v[10:11], v[8:9]
+; VI-NEXT:    v_div_scale_f64 v[10:11], vcc, v[2:3], v[4:5], v[2:3]
+; VI-NEXT:    v_mul_f64 v[12:13], v[10:11], v[8:9]
+; VI-NEXT:    v_fma_f64 v[6:7], -v[6:7], v[12:13], v[10:11]
+; VI-NEXT:    s_nop 1
+; VI-NEXT:    v_div_fmas_f64 v[6:7], v[6:7], v[8:9], v[12:13]
+; VI-NEXT:    v_div_fixup_f64 v[6:7], v[6:7], v[4:5], v[2:3]
+; VI-NEXT:    v_trunc_f64_e32 v[6:7], v[6:7]
+; VI-NEXT:    v_fma_f64 v[2:3], -v[6:7], v[4:5], v[2:3]
+; VI-NEXT:    flat_store_dwordx2 v[0:1], v[2:3]
+; VI-NEXT:    s_endpgm
+                      double addrspace(1)* %in2) #0 {
+   %r0 = load double, double addrspace(1)* %in1, align 8
+   %r1 = load double, double addrspace(1)* %in2, align 8
+   %r2 = frem fast double %r0, %r1
+   store double %r2, double addrspace(1)* %out, align 8
+   ret void
+}
+
 define amdgpu_kernel void @unsafe_frem_f64(double addrspace(1)* %out, double addrspace(1)* %in1,
 ; SI-LABEL: unsafe_frem_f64:
 ; SI:       ; %bb.0:


        


More information about the llvm-commits mailing list