[llvm] c281a6a - [RISCV] Add isel pattern for int_riscv_vfmv_s_f with scalar FP constant operand.
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Wed Nov 15 10:51:58 PST 2023
Author: Craig Topper
Date: 2023-11-15T10:51:43-08:00
New Revision: c281a6add55a861a16eabe3bb8b436f63ef8e0cb
URL: https://github.com/llvm/llvm-project/commit/c281a6add55a861a16eabe3bb8b436f63ef8e0cb
DIFF: https://github.com/llvm/llvm-project/commit/c281a6add55a861a16eabe3bb8b436f63ef8e0cb.diff
LOG: [RISCV] Add isel pattern for int_riscv_vfmv_s_f with scalar FP constant operand.
Use vmv_s_x instead of the constant will be materialized in a GPR.
This avoids going from GPR to FPR to vector.
We already did this for RISCVISD::VFMV_S_F_VL and probably we should
just turn int_riscv_vfmv_s_f into RISCVISD::VFMV_S_F_VL, but I'd like
to see some improvements to RISCVInsertVSETVLI first.
Added:
Modified:
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 01a425298c9da28..be50bb95c81164e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -241,6 +241,8 @@ def VLOpFrag : PatFrag<(ops), (XLenVT (VLOp (XLenVT AVL:$vl)))>;
// This must be kept in sync with RISCV::VLMaxSentinel.
def VLMax : OutPatFrag<(ops), (XLenVT -1)>;
+def SelectFPImm : ComplexPattern<fAny, 1, "selectFPImm", [], [], 1>;
+
// List of EEW.
defvar EEWList = [8, 16, 32, 64];
@@ -7425,6 +7427,11 @@ foreach fvti = AllFloatVectors in {
(fvti.Scalar (fpimm0)), VLOpFrag)),
(!cast<Instruction>("PseudoVMV_S_X_" # fvti.LMul.MX)
(fvti.Vector $rs1), (XLenVT X0), GPR:$vl, fvti.Log2SEW)>;
+
+ def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
+ (fvti.Scalar (SelectFPImm (XLenVT GPR:$imm))), VLOpFrag)),
+ (!cast<Instruction>("PseudoVMV_S_X_" # fvti.LMul.MX)
+ (fvti.Vector $rs1), GPR:$imm, GPR:$vl, fvti.Log2SEW)>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index a27719455642a71..dc6b57fad321055 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -585,8 +585,6 @@ def SplatPat_simm5_plus1_nonzero
def Low8BitsSplatPat
: ComplexPattern<vAny, 1, "selectLow8BitsVSplat", [], [], 2>;
-def SelectFPImm : ComplexPattern<fAny, 1, "selectFPImm", [], [], 1>;
-
// Ignore the vl operand on vmv_v_f, and vmv_s_f.
def SplatFPOp : PatFrags<(ops node:$op),
[(riscv_vfmv_v_f_vl undef, node:$op, srcvalue),
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
index 1cc5f7906ede84c..4fdc7f2c774f5b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
@@ -368,9 +368,8 @@ define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16_negzero(<vscale x 1 x h
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16_negzero:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui a1, 1048568
-; CHECK-NEXT: fmv.h.x fa5, a1
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
-; CHECK-NEXT: vfmv.s.f v8, fa5
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half> %0, half -0.0, iXLen %1)
@@ -381,9 +380,8 @@ define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32_negzero(<vscale x 1 x
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32_negzero:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: lui a1, 524288
-; CHECK-NEXT: fmv.w.x fa5, a1
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
-; CHECK-NEXT: vfmv.s.f v8, fa5
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float> %0, float -0.0, iXLen %1)
@@ -401,10 +399,10 @@ define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64_negzero(<vscale x 1 x
;
; RV64-LABEL: intrinsic_vfmv.s.f_f_nxv1f64_negzero:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: fmv.d.x fa5, zero
-; RV64-NEXT: fneg.d fa5, fa5
+; RV64-NEXT: li a1, -1
+; RV64-NEXT: slli a1, a1, 63
; RV64-NEXT: vsetvli zero, a0, e64, m1, tu, ma
-; RV64-NEXT: vfmv.s.f v8, fa5
+; RV64-NEXT: vmv.s.x v8, a1
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64(<vscale x 1 x double> %0, double -0.0, iXLen %1)
More information about the llvm-commits
mailing list