[llvm] 39af465 - [AArch64][SVE] Replace destructive operand of vector zeros with a bundled MOVPRFX instruction
via llvm-commits
llvm-commits at lists.llvm.org
Mon Feb 6 19:04:18 PST 2023
Author: zhongyunde
Date: 2023-02-07T11:03:50+08:00
New Revision: 39af4659f24026fd3c667ce50a9e798485e1ae98
URL: https://github.com/llvm/llvm-project/commit/39af4659f24026fd3c667ce50a9e798485e1ae98
DIFF: https://github.com/llvm/llvm-project/commit/39af4659f24026fd3c667ce50a9e798485e1ae98.diff
LOG: [AArch64][SVE] Replace destructive operand of vector zeros with a bundled MOVPRFX instruction
Replace unary instructions where the destructive operand is a vector of zeros
with a bundled MOVPRFX instruction, e.g:
Transform:
%X0 = DUP_ZI_S 0, 0
%X0 = FLOGB_ZPmZ_S X0, P0, X2
into:
X0 = MOVPRFX P0/z, X1 // doesn't introduce any fake register dependencies compare to X0 = MOVPRFX P0/z, X0
X0 = FLOGB_ZPmZ_S X0, P0, X2
NOTE: This patch add a @earlyclobber constraint to PredOneOpPassthruPseudo to ensure
safe register allocation for movprfx usage.
Depends on D105889
Reviewed By: paulwalker-arm
Differential Revision: https://reviews.llvm.org/D138888
Added:
llvm/test/CodeGen/AArch64/sve2-intrinsics-fp-int-binary-logarithm-zeroing.ll
Modified:
llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
llvm/lib/Target/AArch64/SVEInstrFormats.td
Removed:
################################################################################
diff --git a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
index 0da93d0bb4d1..11f1d4a8a4b7 100644
--- a/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ExpandPseudoInsts.cpp
@@ -1045,8 +1045,8 @@ bool AArch64ExpandPseudo::expandMI(MachineBasicBlock &MBB,
int OrigInstr = AArch64::getSVEPseudoMap(MI.getOpcode());
if (OrigInstr != -1) {
auto &Orig = TII->get(OrigInstr);
- if ((Orig.TSFlags & AArch64::DestructiveInstTypeMask)
- != AArch64::NotDestructive) {
+ if ((Orig.TSFlags & AArch64::DestructiveInstTypeMask) !=
+ AArch64::NotDestructive) {
return expand_DestructiveOp(MI, MBB, MBBI);
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
index 079365d6b1b6..3c0214e83cdf 100644
--- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td
@@ -3539,7 +3539,7 @@ let Predicates = [HasSVE2] in {
let Predicates = [HasSVE2orSME] in {
// SVE2 floating-point base 2 logarithm as integer
- defm FLOGB_ZPmZ : sve2_fp_flogb<"flogb", int_aarch64_sve_flogb>;
+ defm FLOGB_ZPmZ : sve2_fp_flogb<"flogb", "FLOGB_ZPZZ", int_aarch64_sve_flogb>;
// SVE2 floating-point convert precision
defm FCVTXNT_ZPmZ : sve2_fp_convert_down_odd_rounding_top<"fcvtxnt", "int_aarch64_sve_fcvtxnt">;
@@ -3581,6 +3581,10 @@ let Predicates = [HasSVE2orSME] in {
def EXT_ZZI_B : sve2_int_perm_extract_i_cons<"ext">;
} // End HasSVE2orSME
+let Predicates = [HasSVE2orSME, UseExperimentalZeroingPseudos] in {
+ defm FLOGB_ZPZZ : sve2_fp_un_pred_zeroing_hsd<int_aarch64_sve_flogb>;
+} // End HasSVE2orSME, UseExperimentalZeroingPseudos
+
let Predicates = [HasSVE2] in {
// SVE2 non-temporal gather loads
defm LDNT1SB_ZZR_S : sve2_mem_gldnt_vs_32_ptrs<0b00000, "ldnt1sb", AArch64ldnt1s_gather_z, nxv4i8>;
diff --git a/llvm/lib/Target/AArch64/SVEInstrFormats.td b/llvm/lib/Target/AArch64/SVEInstrFormats.td
index f45ff71d1823..51f5e5b90410 100644
--- a/llvm/lib/Target/AArch64/SVEInstrFormats.td
+++ b/llvm/lib/Target/AArch64/SVEInstrFormats.td
@@ -520,6 +520,11 @@ class SVE_3_Op_Pat_Shift_Imm_SelZero<ValueType vtd, SDPatternOperator op,
Operand vt3, Instruction inst>
: Pat<(vtd (op vt1:$Op1, (vselect vt1:$Op1, vt2:$Op2, (SVEDup0)), (i32 (vt3:$Op3)))),
(inst $Op1, $Op2, vt3:$Op3)>;
+
+class SVE_2_Op_Pat_Zero<ValueType vtd, SDPatternOperator op, ValueType vt1,
+ ValueType vt2, Instruction inst>
+ : Pat<(vtd (op (vtd (SVEDup0)), vt1:$Op1, vt2:$Op2)),
+ (inst (IMPLICIT_DEF), $Op1, $Op2)>;
}
//
@@ -677,9 +682,13 @@ let hasNoSchedulingInfo = 1 in {
// Pseudos for passthru operands
//
let hasNoSchedulingInfo = 1 in {
- class PredOneOpPassthruPseudo<string name, ZPRRegOp zprty>
+ class PredOneOpPassthruPseudo<string name, ZPRRegOp zprty,
+ FalseLanesEnum flags = FalseLanesNone>
: SVEPseudo2Instr<name, 0>,
- Pseudo<(outs zprty:$Zd), (ins zprty:$Passthru, PPR3bAny:$Pg, zprty:$Zs), []>;
+ Pseudo<(outs zprty:$Zd), (ins zprty:$Passthru, PPR3bAny:$Pg, zprty:$Zs), []> {
+ let FalseLanes = flags;
+ let Constraints = !if(!eq(flags, FalseLanesZero), "$Zd = $Passthru, at earlyclobber $Zd", "");
+ }
}
//===----------------------------------------------------------------------===//
@@ -2903,16 +2912,29 @@ multiclass sve_fp_2op_p_zd_HSD<bits<5> opc, string asm, SDPatternOperator op> {
defm : SVE_1_Op_PassthruUndef_Pat<nxv2f64, op, nxv2i1, nxv2f64, !cast<Instruction>(NAME # _UNDEF_D)>;
}
-multiclass sve2_fp_flogb<string asm, SDPatternOperator op> {
- def _H : sve_fp_2op_p_zd<0b0011010, asm, ZPR16, ZPR16, ElementSizeH>;
- def _S : sve_fp_2op_p_zd<0b0011100, asm, ZPR32, ZPR32, ElementSizeS>;
- def _D : sve_fp_2op_p_zd<0b0011110, asm, ZPR64, ZPR64, ElementSizeD>;
+multiclass sve2_fp_flogb<string asm, string Ps, SDPatternOperator op> {
+ def _H : sve_fp_2op_p_zd<0b0011010, asm, ZPR16, ZPR16, ElementSizeH>,
+ SVEPseudo2Instr<Ps # _H, 1>;
+ def _S : sve_fp_2op_p_zd<0b0011100, asm, ZPR32, ZPR32, ElementSizeS>,
+ SVEPseudo2Instr<Ps # _S, 1>;
+ def _D : sve_fp_2op_p_zd<0b0011110, asm, ZPR64, ZPR64, ElementSizeD>,
+ SVEPseudo2Instr<Ps # _D, 1>;
def : SVE_3_Op_Pat<nxv8i16, op, nxv8i16, nxv8i1, nxv8f16, !cast<Instruction>(NAME # _H)>;
def : SVE_3_Op_Pat<nxv4i32, op, nxv4i32, nxv4i1, nxv4f32, !cast<Instruction>(NAME # _S)>;
def : SVE_3_Op_Pat<nxv2i64, op, nxv2i64, nxv2i1, nxv2f64, !cast<Instruction>(NAME # _D)>;
}
+multiclass sve2_fp_un_pred_zeroing_hsd<SDPatternOperator op> {
+ def _ZERO_H : PredOneOpPassthruPseudo<NAME # _H, ZPR16, FalseLanesZero>;
+ def _ZERO_S : PredOneOpPassthruPseudo<NAME # _S, ZPR32, FalseLanesZero>;
+ def _ZERO_D : PredOneOpPassthruPseudo<NAME # _D, ZPR64, FalseLanesZero>;
+
+ def : SVE_2_Op_Pat_Zero<nxv8i16, op, nxv8i1, nxv8f16, !cast<Pseudo>(NAME # _ZERO_H)>;
+ def : SVE_2_Op_Pat_Zero<nxv4i32, op, nxv4i1, nxv4f32, !cast<Pseudo>(NAME # _ZERO_S)>;
+ def : SVE_2_Op_Pat_Zero<nxv2i64, op, nxv2i1, nxv2f64, !cast<Pseudo>(NAME # _ZERO_D)>;
+}
+
multiclass sve2_fp_convert_down_odd_rounding<string asm, string op> {
def _DtoS : sve_fp_2op_p_zd<0b0001010, asm, ZPR64, ZPR32, ElementSizeD>;
def : SVE_3_Op_Pat<nxv4f32, !cast<SDPatternOperator>(op # _f32f64), nxv4f32, nxv2i1, nxv2f64, !cast<Instruction>(NAME # _DtoS)>;
diff --git a/llvm/test/CodeGen/AArch64/sve2-intrinsics-fp-int-binary-logarithm-zeroing.ll b/llvm/test/CodeGen/AArch64/sve2-intrinsics-fp-int-binary-logarithm-zeroing.ll
new file mode 100644
index 000000000000..f61df8271273
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/sve2-intrinsics-fp-int-binary-logarithm-zeroing.ll
@@ -0,0 +1,47 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 -mattr=+use-experimental-zeroing-pseudos < %s | FileCheck %s
+
+;
+; FLOGB
+;
+
+; NOTE: The %unused paramter ensures z0 is free, leading to a simpler test.
+define <vscale x 8 x i16> @flogb_f16(<vscale x 8 x i16> %unused, <vscale x 8 x i1> %pg, <vscale x 8 x half> %a) {
+; CHECK-LABEL: flogb_f16:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.h, p0/z, z1.h
+; CHECK-NEXT: flogb z0.h, p0/m, z1.h
+; CHECK-NEXT: ret
+ %out = call <vscale x 8 x i16> @llvm.aarch64.sve.flogb.nxv8f16(<vscale x 8 x i16> zeroinitializer,
+ <vscale x 8 x i1> %pg,
+ <vscale x 8 x half> %a)
+ ret <vscale x 8 x i16> %out
+}
+
+define <vscale x 4 x i32> @flogb_f32(<vscale x 4 x i32> %unused, <vscale x 4 x i1> %pg, <vscale x 4 x float> %a) {
+; CHECK-LABEL: flogb_f32:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.s, p0/z, z1.s
+; CHECK-NEXT: flogb z0.s, p0/m, z1.s
+; CHECK-NEXT: ret
+ %out = call <vscale x 4 x i32> @llvm.aarch64.sve.flogb.nxv4f32(<vscale x 4 x i32> zeroinitializer,
+ <vscale x 4 x i1> %pg,
+ <vscale x 4 x float> %a)
+ ret <vscale x 4 x i32> %out
+}
+
+define <vscale x 2 x i64> @flogb_f64(<vscale x 2 x i64> %unused, <vscale x 2 x i1> %pg, <vscale x 2 x double> %a) {
+; CHECK-LABEL: flogb_f64:
+; CHECK: // %bb.0:
+; CHECK-NEXT: movprfx z0.d, p0/z, z1.d
+; CHECK-NEXT: flogb z0.d, p0/m, z1.d
+; CHECK-NEXT: ret
+ %out = call <vscale x 2 x i64> @llvm.aarch64.sve.flogb.nxv2f64(<vscale x 2 x i64> zeroinitializer,
+ <vscale x 2 x i1> %pg,
+ <vscale x 2 x double> %a)
+ ret <vscale x 2 x i64> %out
+}
+
+declare <vscale x 8 x i16> @llvm.aarch64.sve.flogb.nxv8f16(<vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x half>)
+declare <vscale x 4 x i32> @llvm.aarch64.sve.flogb.nxv4f32(<vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x float>)
+declare <vscale x 2 x i64> @llvm.aarch64.sve.flogb.nxv2f64(<vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x double>)
More information about the llvm-commits
mailing list