[llvm] f3dcc0c - [LLVM][AArch64][tblgen]: Match clamp pattern (#75529)

via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 20 06:37:03 PST 2023


Author: Hassnaa Hamdi
Date: 2023-12-20T14:36:58Z
New Revision: f3dcc0cba982f1e7d6089c5faa2a7c7c7a743e65

URL: https://github.com/llvm/llvm-project/commit/f3dcc0cba982f1e7d6089c5faa2a7c7c7a743e65
DIFF: https://github.com/llvm/llvm-project/commit/f3dcc0cba982f1e7d6089c5faa2a7c7c7a743e65.diff

LOG: [LLVM][AArch64][tblgen]: Match clamp pattern (#75529)

Add isel pattern to replase min(max(v1,v2),v3) by clamp
Add tests for uclamp, sclamp, bfclamp, fclamp.

Added: 
    llvm/test/CodeGen/AArch64/sve2-min-max-clamp.ll

Modified: 
    llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index f68059889d0c51..15b47f6f6e5259 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -316,6 +316,26 @@ def AArch64ssra : PatFrags<(ops node:$op1, node:$op2, node:$op3),
                            [(int_aarch64_sve_ssra node:$op1, node:$op2, node:$op3),
                             (add node:$op1, (AArch64asr_p (SVEAnyPredicate), node:$op2, (SVEShiftSplatImmR (i32 node:$op3))))]>;
 
+// Replace pattern min(max(v1,v2),v3) by clamp
+def AArch64sclamp : PatFrags<(ops node:$Zd, node:$Zn, node:$Zm),
+                              [(int_aarch64_sve_sclamp node:$Zd, node:$Zn, node:$Zm),
+                              (AArch64smin_p (SVEAllActive),
+                                  (AArch64smax_p (SVEAllActive), node:$Zd, node:$Zn),
+                                  node:$Zm)
+                               ]>;
+def AArch64uclamp : PatFrags<(ops node:$Zd, node:$Zn, node:$Zm),
+                              [(int_aarch64_sve_uclamp node:$Zd, node:$Zn, node:$Zm),
+                               (AArch64umin_p (SVEAllActive),
+                                  (AArch64umax_p (SVEAllActive), node:$Zd, node:$Zn),
+                                  node:$Zm)
+                              ]>;
+def AArch64fclamp : PatFrags<(ops node:$Zd, node:$Zn, node:$Zm),
+                              [(int_aarch64_sve_fclamp node:$Zd, node:$Zn, node:$Zm),
+                              (AArch64fminnm_p (SVEAllActive),
+                                  (AArch64fmaxnm_p (SVEAllActive), node:$Zd, node:$Zn),
+                               node:$Zm)
+                               ]>;
+
 def SDT_AArch64FCVT : SDTypeProfile<1, 3, [
   SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3>,
   SDTCVecEltisVT<1,i1>
@@ -3802,8 +3822,8 @@ let Predicates = [HasSVE2BitPerm] in {
 let Predicates = [HasSVE2p1_or_HasSME] in {
 defm REVD_ZPmZ : sve2_int_perm_revd<"revd", AArch64revd_mt>;
 
-defm SCLAMP_ZZZ : sve2_clamp<"sclamp", 0b0, int_aarch64_sve_sclamp>;
-defm UCLAMP_ZZZ : sve2_clamp<"uclamp", 0b1, int_aarch64_sve_uclamp>;
+defm SCLAMP_ZZZ : sve2_clamp<"sclamp", 0b0, AArch64sclamp>;
+defm UCLAMP_ZZZ : sve2_clamp<"uclamp", 0b1, AArch64uclamp>;
 
 defm PSEL_PPPRI : sve2_int_perm_sel_p<"psel", int_aarch64_sve_psel>;
 } // End HasSVE2p1_or_HasSME
@@ -3813,7 +3833,7 @@ defm PSEL_PPPRI : sve2_int_perm_sel_p<"psel", int_aarch64_sve_psel>;
 //===----------------------------------------------------------------------===//
 
 let Predicates = [HasSVE2p1_or_HasSME2] in {
-defm FCLAMP_ZZZ : sve2p1_fclamp<"fclamp", int_aarch64_sve_fclamp>;
+defm FCLAMP_ZZZ : sve2p1_fclamp<"fclamp", AArch64fclamp>;
 
 defm FDOT_ZZZ_S  : sve_float_dot<0b0, 0b0, ZPR32, ZPR16, "fdot", nxv8f16, int_aarch64_sve_fdot_x2>;
 defm FDOT_ZZZI_S : sve_float_dot_indexed<0b0, 0b00, ZPR16, ZPR3b16, "fdot", nxv8f16, int_aarch64_sve_fdot_lane_x2>;
@@ -4055,7 +4075,7 @@ defm BFMINNM_ZPZZ : sve2p1_bf_bin_pred_zds<AArch64fminnm_p>;
 
 defm BFMUL_ZZZI : sve2p1_fp_bfmul_by_indexed_elem<"bfmul", int_aarch64_sve_fmul_lane>;
 
-defm BFCLAMP_ZZZ : sve2p1_bfclamp<"bfclamp", int_aarch64_sve_fclamp>;
+defm BFCLAMP_ZZZ : sve2p1_bfclamp<"bfclamp", AArch64fclamp>;
 } // End HasSVE2orSME2, HasB16B16
 
 

diff  --git a/llvm/test/CodeGen/AArch64/sve2-min-max-clamp.ll b/llvm/test/CodeGen/AArch64/sve2-min-max-clamp.ll
new file mode 100644
index 00000000000000..28ec430aff3d3d
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2-min-max-clamp.ll
@@ -0,0 +1,151 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 < %s | FileCheck %s
+
+; Replace pattern min(max(v1,v2),v3) by clamp
+
+define <vscale x 16 x i8> @uclampi8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: uclampi8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uclamp z0.b, z1.b, z2.b
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+  %res = tail call <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8> %min, <vscale x 16 x i8> %c)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @uclampi16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: uclampi16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uclamp z0.h, z1.h, z2.h
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+  %res = tail call <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16> %min, <vscale x 8 x i16> %c)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @uclampi32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: uclampi32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uclamp z0.s, z1.s, z2.s
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+  %res = tail call <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32> %min, <vscale x 4 x i32> %c)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @uclampi64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: uclampi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    uclamp z0.d, z1.d, z2.d
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+  %res = tail call <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64> %min, <vscale x 2 x i64> %c)
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 16 x i8> @sclampi8(<vscale x 16 x i8> %c, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b) {
+; CHECK-LABEL: sclampi8:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sclamp z0.b, z1.b, z2.b
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b)
+  %res = tail call <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8> %min, <vscale x 16 x i8> %c)
+  ret <vscale x 16 x i8> %res
+}
+
+define <vscale x 8 x i16> @sclampi16(<vscale x 8 x i16> %c, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) {
+; CHECK-LABEL: sclampi16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sclamp z0.h, z1.h, z2.h
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+  %res = tail call <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16> %min, <vscale x 8 x i16> %c)
+  ret <vscale x 8 x i16> %res
+}
+
+define <vscale x 4 x i32> @sclampi32(<vscale x 4 x i32> %c, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b) {
+; CHECK-LABEL: sclampi32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sclamp z0.s, z1.s, z2.s
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b)
+  %res = tail call <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32> %min, <vscale x 4 x i32> %c)
+  ret <vscale x 4 x i32> %res
+}
+
+define <vscale x 2 x i64> @sclampi64(<vscale x 2 x i64> %c, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
+; CHECK-LABEL: sclampi64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    sclamp z0.d, z1.d, z2.d
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b)
+  %res = tail call <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64> %min, <vscale x 2 x i64> %c)
+  ret <vscale x 2 x i64> %res
+}
+
+define <vscale x 8 x bfloat> @fclampbf16(<vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b, <vscale x 8 x bfloat> %c) {
+; CHECK-LABEL: fclampbf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    bfclamp z0.h, z1.h, z2.h
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> %a, <vscale x 8 x bfloat> %b)
+  %res = tail call <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1> splat (i1 true), <vscale x 8 x bfloat> %min, <vscale x 8 x bfloat> %c)
+  ret <vscale x 8 x bfloat> %res
+}
+
+define <vscale x 8 x half> @fclampf16(<vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x half> %c) {
+; CHECK-LABEL: fclampf16:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fclamp z0.h, z1.h, z2.h
+; CHECK-NEXT:    ret
+  %min = call <vscale x 8 x half> @llvm.maxnum.nxv8f16(<vscale x 8 x half> %a, <vscale x 8 x half> %b)
+  %res = call <vscale x 8 x half> @llvm.minnum.nxv8f16(<vscale x 8 x half> %min, <vscale x 8 x half> %c)
+  ret <vscale x 8 x half> %res
+}
+
+define <vscale x 4 x float> @fclampf32(<vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x float> %c) {
+; CHECK-LABEL: fclampf32:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fclamp z0.s, z1.s, z2.s
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 4 x float> @llvm.maxnum.nxv4f32(<vscale x 4 x float> %a, <vscale x 4 x float> %b)
+  %res = tail call <vscale x 4 x float> @llvm.minnum.nxv4f32(<vscale x 4 x float> %min, <vscale x 4 x float> %c)
+  ret <vscale x 4 x float> %res
+}
+
+define <vscale x 2 x double> @fclampf64(<vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) {
+; CHECK-LABEL: fclampf64:
+; CHECK:       // %bb.0:
+; CHECK-NEXT:    fclamp z0.d, z1.d, z2.d
+; CHECK-NEXT:    ret
+  %min = tail call <vscale x 2 x double> @llvm.maxnum.nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b)
+  %res = tail call <vscale x 2 x double> @llvm.minnum.nxv2f64(<vscale x 2 x double> %min, <vscale x 2 x double> %c)
+  ret <vscale x 2 x double> %res
+}
+
+declare <vscale x 16 x i8> @llvm.umax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 16 x i8> @llvm.umin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.umax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 8 x i16> @llvm.umin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.umax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.umin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.umax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.umin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 16 x i8> @llvm.smax.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 16 x i8> @llvm.smin.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
+declare <vscale x 8 x i16> @llvm.smax.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 8 x i16> @llvm.smin.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
+declare <vscale x 4 x i32> @llvm.smax.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 4 x i32> @llvm.smin.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
+declare <vscale x 2 x i64> @llvm.smax.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+declare <vscale x 2 x i64> @llvm.smin.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
+
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fmaxnm.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
+declare <vscale x 8 x bfloat> @llvm.aarch64.sve.fminnm.u.nxv8bf16(<vscale x 8 x i1>, <vscale x 8 x bfloat>, <vscale x 8 x bfloat>)
+declare <vscale x 8 x half>   @llvm.maxnum.nxv8f16 (<vscale x 8 x half>,   <vscale x 8 x half>)
+declare <vscale x 8 x half>   @llvm.minnum.nxv8f16 (<vscale x 8 x half>,   <vscale x 8 x half>)
+declare <vscale x 4 x float>  @llvm.maxnum.nxv4f32 (<vscale x 4 x float>,  <vscale x 4 x float>)
+declare <vscale x 4 x float>  @llvm.minnum.nxv4f32 (<vscale x 4 x float>,  <vscale x 4 x float>)
+declare <vscale x 2 x double> @llvm.maxnum.nxv2f64 (<vscale x 2 x double>, <vscale x 2 x double>)
+declare <vscale x 2 x double> @llvm.minnum.nxv2f64 (<vscale x 2 x double>, <vscale x 2 x double>)


        


More information about the llvm-commits mailing list