[llvm] a20503c - AMDGPU: Add regression tests for fmin/fmax legacy matching

Matt Arsenault via llvm-commits llvm-commits at lists.llvm.org
Mon Dec 19 08:36:18 PST 2022


Author: Matt Arsenault
Date: 2022-12-19T11:36:13-05:00
New Revision: a20503caa101fc03cf34b8c1611f65c43c6e800e

URL: https://github.com/llvm/llvm-project/commit/a20503caa101fc03cf34b8c1611f65c43c6e800e
DIFF: https://github.com/llvm/llvm-project/commit/a20503caa101fc03cf34b8c1611f65c43c6e800e.diff

LOG: AMDGPU: Add regression tests for fmin/fmax legacy matching

Added: 
    

Modified: 
    llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll
index 195ea0b92395..68ca9d7cbd7a 100644
--- a/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll
+++ b/llvm/test/CodeGen/AMDGPU/fmin_fmax_legacy.amdgcn.ll
@@ -1,8 +1,8 @@
-; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GCN %s
+; RUN: llc -march=amdgcn < %s | FileCheck -enable-var-scope -check-prefixes=SI-SAFE,GCN %s
+; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -march=amdgcn < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-NNAN %s
 
-; RUN: llc -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN %s
-; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -march=amdgcn -mcpu=fiji -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GCN %s
+; RUN: llc -march=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope -check-prefixes=VI-SAFE,GCN %s
+; RUN: llc -enable-no-nans-fp-math -enable-no-signed-zeros-fp-math -march=amdgcn -mcpu=fiji < %s | FileCheck -enable-var-scope --check-prefixes=GCN,GCN-NNAN %s
 
 ; GCN-LABEL: {{^}}min_fneg_select_regression_0:
 ; GCN-NOT: v_mul
@@ -72,5 +72,190 @@ define amdgpu_ps float @max_fneg_select_regression_posk_0(float %a) #0 {
   ret float %min.a
 }
 
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg1:
+; SI-SAFE: v_min_legacy_f32_e64 v0, 1.0, -v0
+
+; VI-SAFE: v_cmp_nge_f32_e32 vcc, -1.0, v0
+; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, -1.0, v0, vcc
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+
+; GCN-NNAN: v_min_f32_e64 v0, -v0, 1.0
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg1(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp ugt float %a, -1.0
+  %min.a = select i1 %cmp.a, float %fneg.a, float 1.0
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg1:
+; SI-SAFE: v_max_legacy_f32_e64 v0, 1.0, -v0
+
+; VI-SAFE: v_cmp_nle_f32_e32 vcc, -1.0, v0
+; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, -1.0, v0, vcc
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+
+; GCN-NNAN: v_max_f32_e64 v0, -v0, 1.0
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg1(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp ult float %a, -1.0
+  %min.a = select i1 %cmp.a, float %fneg.a, float 1.0
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg1:
+; SI-SAFE: v_min_legacy_f32_e64 v0, -v0, 1.0
+
+; VI-SAFE: v_cmp_lt_f32_e32 vcc, -1.0, v0
+; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, -1.0, v0, vcc
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+
+; GCN-NNAN: v_min_f32_e64 v0, -v0, 1.0
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg1(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp ogt float %a, -1.0
+  %min.a = select i1 %cmp.a, float %fneg.a, float 1.0
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg1:
+; SI-SAFE: v_max_legacy_f32_e64 v0, -v0, 1.0
+
+; VI-SAFE: v_cmp_gt_f32_e32 vcc, -1.0, v0
+; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, -1.0, v0, vcc
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+
+; GCN-NANN: v_max_f32_e64 v0, -v0, 1.0
+define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg1(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp olt float %a, -1.0
+  %min.a = select i1 %cmp.a, float %fneg.a, float 1.0
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ugt_a_neg8:
+; SI-SAFE: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; SI-SAFE-NEXT: v_min_legacy_f32_e64 v0, [[K]], -v0
+
+; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
+; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0xc1000000
+; VI-SAFE: v_cmp_nge_f32_e32 vcc, [[K0]], v0
+; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, [[K1]], v0, vcc
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+
+; GCN-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; GCN-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]]
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ugt_a_neg8(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp ugt float %a, -8.0
+  %min.a = select i1 %cmp.a, float %fneg.a, float 8.0
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ult_a_neg8:
+; SI-SAFE: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; SI-SAFE-NEXT: v_max_legacy_f32_e64 v0, [[K]], -v0
+
+; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
+; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0xc1000000
+; VI-SAFE: v_cmp_nle_f32_e32 vcc, [[K0]], v0
+; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, [[K1]], v0, vcc
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+
+; GCN-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; GCN-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]]
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ult_a_neg8(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp ult float %a, -8.0
+  %min.a = select i1 %cmp.a, float %fneg.a, float 8.0
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_ogt_a_neg8:
+; SI-SAFE: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; SI-SAFE-NEXT: v_min_legacy_f32_e64 v0, -v0, [[K]]
+
+; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
+; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0xc1000000
+; VI-SAFE: v_cmp_lt_f32_e32 vcc, [[K0]], v0
+; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, [[K1]], v0, vcc
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+
+; GCN-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; GCN-NNAN-NEXT: v_min_f32_e64 v0, -v0, [[K]]
+define amdgpu_ps float @select_fneg_a_or_q_cmp_ogt_a_neg8(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp ogt float %a, -8.0
+  %min.a = select i1 %cmp.a, float %fneg.a, float 8.0
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}select_fneg_a_or_q_cmp_olt_a_neg8:
+; SI-SAFE: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; SI-SAFE-NEXT: v_max_legacy_f32_e64 v0, -v0, [[K]]
+
+
+; VI-SAFE-DAG: s_mov_b32 [[K0:s[0-9]+]], 0xc1000000
+; VI-SAFE-DAG: v_mov_b32_e32 [[K1:v[0-9]+]], 0xc1000000
+; VI-SAFE: v_cmp_gt_f32_e32 vcc, [[K0]], v0
+; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, [[K1]], v0, vcc
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+
+; GCN-NNAN: s_mov_b32 [[K:s[0-9]+]], 0x41000000
+; GCN-NNAN-NEXT: v_max_f32_e64 v0, -v0, [[K]]
+define amdgpu_ps float @select_fneg_a_or_q_cmp_olt_a_neg8(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp olt float %a, -8.0
+  %min.a = select i1 %cmp.a, float %fneg.a, float 8.0
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}select_fneg_a_or_neg1_cmp_olt_a_1:
+; SI-SAFE: v_max_legacy_f32_e64 v0, -v0, -1.0
+
+; VI-SAFE: v_cmp_gt_f32_e32 vcc, 1.0, v0
+; VI-SAFE-NEXT: v_cndmask_b32_e32 v0, 1.0, v0, vcc
+; VI-SAFE-NEXT: v_xor_b32_e32 v0, 0x80000000, v0
+
+; GCN-NNAN: v_max_f32_e64 v0, -v0, -1.0
+define amdgpu_ps float @select_fneg_a_or_neg1_cmp_olt_a_1(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp olt float %a, 1.0
+  %min.a = select i1 %cmp.a, float %fneg.a, float -1.0
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}ult_a_select_fneg_a_b:
+; SI-SAFE: v_cmp_nge_f32_e32 vcc, v0, v1
+; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+
+; VI-SAFE: v_cmp_nge_f32_e32 vcc, v0, v1
+; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+
+; GCN-NNAN: v_cmp_lt_f32_e32 vcc, v0, v1
+; GCN-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+
+define amdgpu_ps float @ult_a_select_fneg_a_b(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp ult float %a, %b
+  %min.a = select i1 %cmp.a, float %fneg.a, float %b
+  ret float %min.a
+}
+
+; GCN-LABEL: {{^}}ugt_a_select_fneg_a_b:
+; SI-SAFE: v_cmp_nle_f32_e32 vcc, v0, v1
+; SI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+
+; VI-SAFE: v_cmp_nle_f32_e32 vcc, v0, v1
+; VI-SAFE-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+
+; SI-NNAN: v_cmp_gt_f32_e32 vcc, v0, v1
+; SI-NNAN-NEXT: v_cndmask_b32_e64 v0, v1, -v0, vcc
+define amdgpu_ps float @ugt_a_select_fneg_a_b(float %a, float %b) #0 {
+  %fneg.a = fneg float %a
+  %cmp.a = fcmp ugt float %a, %b
+  %min.a = select i1 %cmp.a, float %fneg.a, float %b
+  ret float %min.a
+}
+
 attributes #0 = { nounwind }
 attributes #1 = { nounwind readnone }


        


More information about the llvm-commits mailing list