[llvm] [RISCV] Remove vfmv.s.f and vfmv.f.s lmul pseudo variants (PR #100970)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 29 04:10:28 PDT 2024
https://github.com/lukel97 updated https://github.com/llvm/llvm-project/pull/100970
>From 87cd6aeffef086069c24cecd3d09c47a8ecad80e Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 29 Jul 2024 12:36:12 +0800
Subject: [PATCH 1/2] [RISCV] Remove vfmv.s.f and vfmv.f.s lmul pseudo variants
In #71501 we removed the LMUL variants for vmv.s.x and vmv.x.s because they ignore register groups, so this patch does the same for their floating point equivalnts.
We don't need to add any extra patterns for extractelt in RISCVInstrInfoVSDPatterns.td because in lowerEXTRACT_VECTOR_ELT we make sure that the node is narrowed down to LMUL 1.
---
llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp | 2 +-
.../Target/RISCV/RISCVInstrInfoVPseudos.td | 35 ++++++++-----------
.../Target/RISCV/RISCVInstrInfoVSDPatterns.td | 8 ++---
.../Target/RISCV/RISCVInstrInfoVVLPatterns.td | 10 +++---
llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll | 6 ++--
.../RISCV/rvv/fixed-vectors-bitcast.ll | 14 ++++----
.../RISCV/rvv/fixed-vectors-fp-bitcast.ll | 18 +++++-----
.../CodeGen/RISCV/rvv/fixed-vectors-llrint.ll | 8 ++---
.../CodeGen/RISCV/rvv/fixed-vectors-lrint.ll | 8 ++---
.../rvv/fixed-vectors-reduction-fp-vp.ll | 12 +++----
...fixed-vectors-vfptoi-constrained-sdnode.ll | 8 ++---
.../CodeGen/RISCV/rvv/fpclamptosat_vec.ll | 12 +++----
llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll | 12 +++----
llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll | 2 +-
llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll | 6 ++--
llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll | 6 ++--
.../RISCV/rvv/vreductions-fp-sdnode.ll | 8 ++---
.../CodeGen/RISCV/rvv/vreductions-fp-vp.ll | 12 +++----
18 files changed, 90 insertions(+), 97 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 96250b9c03b79..7dbda771c8b29 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -458,7 +458,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
}
}
- // vmv.x.s, and vmv.f.s are unconditional and ignore everything except SEW.
+ // vmv.x.s, and vfmv.f.s are unconditional and ignore everything except SEW.
if (isScalarExtractInstr(MI)) {
assert(!RISCVII::hasVLOp(TSFlags));
Res.LMUL = DemandedFields::LMULNone;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index b860273d639ee..6da4fa8106243 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6781,26 +6781,21 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
let Predicates = [HasVInstructionsAnyF] in {
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
foreach f = FPList in {
- foreach m = f.MxList in {
- defvar mx = m.MX;
- let VLMul = m.value in {
- let HasSEWOp = 1, BaseInstr = VFMV_F_S in
- def "PseudoVFMV_" # f.FX # "_S_" # mx :
- Pseudo<(outs f.fprclass:$rd),
- (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
- Sched<[WriteVMovFS, ReadVMovFS]>,
- RISCVVPseudo;
- let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
- Constraints = "$rd = $rs1" in
- def "PseudoVFMV_S_" # f.FX # "_" # mx :
- Pseudo<(outs m.vrclass:$rd),
- (ins m.vrclass:$rs1, f.fprclass:$rs2,
- AVL:$vl, ixlenimm:$sew),
- []>,
- Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>,
- RISCVVPseudo;
- }
- }
+ let HasSEWOp = 1, BaseInstr = VFMV_F_S in
+ def "PseudoVFMV_" # f.FX # "_S" :
+ Pseudo<(outs f.fprclass:$rd),
+ (ins VR:$rs2, ixlenimm:$sew), []>,
+ Sched<[WriteVMovFS, ReadVMovFS]>,
+ RISCVVPseudo;
+ let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
+ Constraints = "$rd = $rs1" in
+ def "PseudoVFMV_S_" # f.FX :
+ Pseudo<(outs VR:$rd),
+ (ins VR:$rs1, f.fprclass:$rs2,
+ AVL:$vl, ixlenimm:$sew),
+ []>,
+ Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>,
+ RISCVVPseudo;
}
}
} // Predicates = [HasVInstructionsAnyF]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index cd4c3b6be19b5..cac3ca5157a44 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -1444,14 +1444,14 @@ foreach fvtiToFWti = AllWidenableBFloatToFloatVectors in {
//===----------------------------------------------------------------------===//
// Vector Element Extracts
//===----------------------------------------------------------------------===//
-foreach vti = AllFloatVectors in {
- defvar vmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_",
+foreach vti = NoGroupFloatVectors in {
+ defvar vfmv_f_s_inst = !cast<Instruction>(!strconcat("PseudoVFMV_",
vti.ScalarSuffix,
- "_S_", vti.LMul.MX));
+ "_S"));
// Only pattern-match extract-element operations where the index is 0. Any
// other index will have been custom-lowered to slide the vector correctly
// into place.
let Predicates = GetVTypePredicates<vti>.Predicates in
def : Pat<(vti.Scalar (extractelt (vti.Vector vti.RegClass:$rs2), 0)),
- (vmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>;
+ (vfmv_f_s_inst vti.RegClass:$rs2, vti.Log2SEW)>;
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 2ed71f6b88974..829cbc71d1843 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2934,18 +2934,16 @@ foreach vti = NoGroupFloatVectors in {
(vti.Scalar (SelectFPImm (XLenVT GPR:$imm))),
VLOpFrag)),
(PseudoVMV_S_X $merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>;
- }
-}
-
-foreach vti = AllFloatVectors in {
- let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
vti.ScalarRegClass:$rs1,
VLOpFrag)),
- (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix#"_"#vti.LMul.MX)
+ (!cast<Instruction>("PseudoVFMV_S_"#vti.ScalarSuffix)
vti.RegClass:$merge,
(vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
}
+}
+
+foreach vti = AllFloatVectors in {
defvar ivti = GetIntVTypeInfo<vti>.Vti;
let Predicates = GetVTypePredicates<ivti>.Predicates in {
def : Pat<(vti.Vector
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
index 3b7952f9f5e6d..209a37bf66ae3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-fp.ll
@@ -7,7 +7,7 @@
define half @extractelt_nxv1f16_0(<vscale x 1 x half> %v) {
; CHECK-LABEL: extractelt_nxv1f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x half> %v, i32 0
@@ -39,7 +39,7 @@ define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 zeroext %idx) {
define half @extractelt_nxv2f16_0(<vscale x 2 x half> %v) {
; CHECK-LABEL: extractelt_nxv2f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x half> %v, i32 0
@@ -199,7 +199,7 @@ define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 zeroext %idx)
define float @extractelt_nxv1f32_0(<vscale x 1 x float> %v) {
; CHECK-LABEL: extractelt_nxv1f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x float> %v, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
index 5252eb71c383d..f124d550df16d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
@@ -265,13 +265,13 @@ define i64 @bitcast_v1i64_i64(<1 x i64> %a) {
define half @bitcast_v2i8_f16(<2 x i8> %a) {
; CHECK-LABEL: bitcast_v2i8_f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
;
; ELEN32-LABEL: bitcast_v2i8_f16:
; ELEN32: # %bb.0:
-; ELEN32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ELEN32-NEXT: vfmv.f.s fa0, v8
; ELEN32-NEXT: ret
%b = bitcast <2 x i8> %a to half
@@ -281,13 +281,13 @@ define half @bitcast_v2i8_f16(<2 x i8> %a) {
define half @bitcast_v1i16_f16(<1 x i16> %a) {
; CHECK-LABEL: bitcast_v1i16_f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
;
; ELEN32-LABEL: bitcast_v1i16_f16:
; ELEN32: # %bb.0:
-; ELEN32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ELEN32-NEXT: vfmv.f.s fa0, v8
; ELEN32-NEXT: ret
%b = bitcast <1 x i16> %a to half
@@ -297,7 +297,7 @@ define half @bitcast_v1i16_f16(<1 x i16> %a) {
define float @bitcast_v4i8_f32(<4 x i8> %a) {
; CHECK-LABEL: bitcast_v4i8_f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
;
@@ -313,7 +313,7 @@ define float @bitcast_v4i8_f32(<4 x i8> %a) {
define float @bitcast_v2i16_f32(<2 x i16> %a) {
; CHECK-LABEL: bitcast_v2i16_f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
;
@@ -329,7 +329,7 @@ define float @bitcast_v2i16_f32(<2 x i16> %a) {
define float @bitcast_v1i32_f32(<1 x i32> %a) {
; CHECK-LABEL: bitcast_v1i32_f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
index 2fe08fc4c2129..5f5015c9ad169 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll
@@ -19,7 +19,7 @@ define i16 @bitcast_v1f16_i16(<1 x half> %a) {
define half @bitcast_v1f16_f16(<1 x half> %a) {
; CHECK-LABEL: bitcast_v1f16_f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x half> %a to half
@@ -49,7 +49,7 @@ define i32 @bitcast_v1f32_i32(<1 x float> %a) {
define float @bitcast_v2f16_f32(<2 x half> %a) {
; CHECK-LABEL: bitcast_v2f16_f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%b = bitcast <2 x half> %a to float
@@ -59,7 +59,7 @@ define float @bitcast_v2f16_f32(<2 x half> %a) {
define float @bitcast_v1f32_f32(<1 x float> %a) {
; CHECK-LABEL: bitcast_v1f32_f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%b = bitcast <1 x float> %a to float
@@ -237,7 +237,7 @@ define <1 x double> @bitcast_i64_v1f64(i64 %a) {
define <1 x i16> @bitcast_f16_v1i16(half %a) {
; CHECK-LABEL: bitcast_f16_v1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%b = bitcast half %a to <1 x i16>
@@ -247,7 +247,7 @@ define <1 x i16> @bitcast_f16_v1i16(half %a) {
define <1 x half> @bitcast_f16_v1f16(half %a) {
; CHECK-LABEL: bitcast_f16_v1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%b = bitcast half %a to <1 x half>
@@ -257,7 +257,7 @@ define <1 x half> @bitcast_f16_v1f16(half %a) {
define <2 x i16> @bitcast_f32_v2i16(float %a) {
; CHECK-LABEL: bitcast_f32_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%b = bitcast float %a to <2 x i16>
@@ -267,7 +267,7 @@ define <2 x i16> @bitcast_f32_v2i16(float %a) {
define <2 x half> @bitcast_f32_v2f16(float %a) {
; CHECK-LABEL: bitcast_f32_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%b = bitcast float %a to <2 x half>
@@ -277,7 +277,7 @@ define <2 x half> @bitcast_f32_v2f16(float %a) {
define <1 x i32> @bitcast_f32_v1i32(float %a) {
; CHECK-LABEL: bitcast_f32_v1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%b = bitcast float %a to <1 x i32>
@@ -287,7 +287,7 @@ define <1 x i32> @bitcast_f32_v1i32(float %a) {
define <1 x float> @bitcast_f32_v1f32(float %a) {
; CHECK-LABEL: bitcast_f32_v1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%b = bitcast float %a to <1 x float>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
index 2d3865ba4533d..901be442c0012 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
@@ -11,7 +11,7 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
@@ -23,10 +23,10 @@ define <1 x i64> @llrint_v1i64_v1f32(<1 x float> %x) {
;
; RV64-LABEL: llrint_v1i64_v1f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.s a0, fa5
-; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vmv.s.x v8, a0
; RV64-NEXT: ret
%a = call <1 x i64> @llvm.llrint.v1i64.v1f32(<1 x float> %x)
@@ -47,7 +47,7 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x20, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 32 + 2 * vlenb
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
index de47d8572017b..a90ee3ebb8766 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
@@ -9,7 +9,7 @@
define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
; RV32-LABEL: lrint_v1f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.s a0, fa5
; RV32-NEXT: vmv.s.x v8, a0
@@ -17,7 +17,7 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
;
; RV64-i32-LABEL: lrint_v1f32:
; RV64-i32: # %bb.0:
-; RV64-i32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.s a0, fa5
; RV64-i32-NEXT: vmv.s.x v8, a0
@@ -25,10 +25,10 @@ define <1 x iXLen> @lrint_v1f32(<1 x float> %x) {
;
; RV64-i64-LABEL: lrint_v1f32:
; RV64-i64: # %bb.0:
-; RV64-i64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-i64-NEXT: vfmv.f.s fa5, v8
; RV64-i64-NEXT: fcvt.l.s a0, fa5
-; RV64-i64-NEXT: vsetvli zero, zero, e64, m1, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i64-NEXT: vmv.s.x v8, a0
; RV64-i64-NEXT: ret
%a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
index 7adaaa05f9dd9..793e8eb5aee6a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
@@ -13,7 +13,7 @@ declare half @llvm.vp.reduce.fadd.v2f16(half, <2 x half>, <2 x i1>, i32)
define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vpreduce_fadd_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ZVFH-NEXT: vfmv.s.f v9, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfredusum.vs v9, v8, v9, v0.t
@@ -39,7 +39,7 @@ define half @vpreduce_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroex
define half @vpreduce_ord_fadd_v2f16(half %s, <2 x half> %v, <2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vpreduce_ord_fadd_v2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ZVFH-NEXT: vfmv.s.f v9, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfredosum.vs v9, v8, v9, v0.t
@@ -67,7 +67,7 @@ declare half @llvm.vp.reduce.fadd.v4f16(half, <4 x half>, <4 x i1>, i32)
define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vpreduce_fadd_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ZVFH-NEXT: vfmv.s.f v9, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfredusum.vs v9, v8, v9, v0.t
@@ -93,7 +93,7 @@ define half @vpreduce_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroex
define half @vpreduce_ord_fadd_v4f16(half %s, <4 x half> %v, <4 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vpreduce_ord_fadd_v4f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ZVFH-NEXT: vfmv.s.f v9, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfredosum.vs v9, v8, v9, v0.t
@@ -121,7 +121,7 @@ declare float @llvm.vp.reduce.fadd.v2f32(float, <2 x float>, <2 x i1>, i32)
define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_fadd_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t
@@ -134,7 +134,7 @@ define float @vpreduce_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zer
define float @vpreduce_ord_fadd_v2f32(float %s, <2 x float> %v, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_ord_fadd_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
index fdb6bfe1baa77..4334f293d1e85 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfptoi-constrained-sdnode.ll
@@ -34,14 +34,14 @@ declare <1 x i7> @llvm.experimental.constrained.fptosi.v1i7.v1f16(<1 x half>, me
define <1 x i7> @vfptosi_v1f16_v1i7(<1 x half> %va) strictfp {
; RV32-LABEL: vfptosi_v1f16_v1i7:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.h a0, fa5, rtz
; RV32-NEXT: ret
;
; RV64-LABEL: vfptosi_v1f16_v1i7:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.h a0, fa5, rtz
; RV64-NEXT: ret
@@ -53,14 +53,14 @@ declare <1 x i7> @llvm.experimental.constrained.fptoui.v1i7.v1f16(<1 x half>, me
define <1 x i7> @vfptoui_v1f16_v1i7(<1 x half> %va) strictfp {
; RV32-LABEL: vfptoui_v1f16_v1i7:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.wu.h a0, fa5, rtz
; RV32-NEXT: ret
;
; RV64-LABEL: vfptoui_v1f16_v1i7:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.lu.h a0, fa5, rtz
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index 3e2db3fa4685d..1395dc914bb40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -2670,7 +2670,7 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
@@ -2803,7 +2803,7 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: snez a1, a1
@@ -2915,7 +2915,7 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, s1
@@ -5985,7 +5985,7 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
@@ -6111,7 +6111,7 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0xc0, 0x00, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 64 + 2 * vlenb
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: mv s0, a0
@@ -6219,7 +6219,7 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: mv s1, a1
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
index 060e99691cb13..8cfa88e6f9569 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insertelt-fp.ll
@@ -7,7 +7,7 @@
define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv1f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, tu, ma
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 0
@@ -29,7 +29,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %
; CHECK-LABEL: insertelt_nxv1f16_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli a2, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
@@ -41,7 +41,7 @@ define <vscale x 1 x half> @insertelt_nxv1f16_idx(<vscale x 1 x half> %v, half %
define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv2f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, tu, ma
+; CHECK-NEXT: vsetvli a0, zero, e16, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 0
@@ -63,7 +63,7 @@ define <vscale x 2 x half> @insertelt_nxv2f16_idx(<vscale x 2 x half> %v, half %
; CHECK-LABEL: insertelt_nxv2f16_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma
+; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
@@ -211,7 +211,7 @@ define <vscale x 32 x half> @insertelt_nxv32f16_idx(<vscale x 32 x half> %v, hal
define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv1f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, tu, ma
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 0
@@ -233,7 +233,7 @@ define <vscale x 1 x float> @insertelt_nxv1f32_idx(<vscale x 1 x float> %v, floa
; CHECK-LABEL: insertelt_nxv1f32_idx:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a1, a0, 1
-; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, ma
; CHECK-NEXT: vslideup.vx v8, v9, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
index 503085f01f812..ae8c36a7cb5e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/unmasked-ta.ll
@@ -928,7 +928,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, ha
define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(half %0, iXLen %1) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
index 47b4b61231122..af1c378c56812 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.f.s.ll
@@ -7,7 +7,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv1f16(<vscale x 1 x half>)
define half @intrinsic_vfmv.f.s_s_nxv1f16(<vscale x 1 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -20,7 +20,7 @@ declare half @llvm.riscv.vfmv.f.s.nxv2f16(<vscale x 2 x half>)
define half @intrinsic_vfmv.f.s_s_nxv2f16(<vscale x 2 x half> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
@@ -85,7 +85,7 @@ declare float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float>)
define float @intrinsic_vfmv.f.s_s_nxv1f32(<vscale x 1 x float> %0) nounwind {
; CHECK-LABEL: intrinsic_vfmv.f.s_s_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
index b3aab2382370d..1e863a4adbc21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll
@@ -9,7 +9,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16(<vscale x 1 x half>, ha
define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
@@ -22,7 +22,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16(<vscale x 2 x half>, ha
define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
@@ -87,7 +87,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32(<vscale x 1 x float>,
define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
index 3ccdb5dfa5e00..30e31cecbf2c7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
@@ -111,9 +111,9 @@ define float @vreduce_ord_fadd_nxv1f32(<vscale x 1 x float> %v, float %s) {
define float @vreduce_fwadd_nxv1f32(<vscale x 1 x half> %v, float %s) {
; CHECK-LABEL: vreduce_fwadd_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwredusum.vs v8, v8, v9
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
@@ -126,9 +126,9 @@ define float @vreduce_fwadd_nxv1f32(<vscale x 1 x half> %v, float %s) {
define float @vreduce_ord_fwadd_nxv1f32(<vscale x 1 x half> %v, float %s) {
; CHECK-LABEL: vreduce_ord_fwadd_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vfwredosum.vs v8, v8, v9
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vfmv.f.s fa0, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
index f21b42e9519b6..7f2e3cdbfd0e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
@@ -13,7 +13,7 @@ declare half @llvm.vp.reduce.fadd.nxv1f16(half, <vscale x 1 x half>, <vscale x 1
define half @vpreduce_fadd_nxv1f16(half %s, <vscale x 1 x half> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vpreduce_fadd_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ZVFH-NEXT: vfmv.s.f v9, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfredusum.vs v9, v8, v9, v0.t
@@ -39,7 +39,7 @@ define half @vpreduce_fadd_nxv1f16(half %s, <vscale x 1 x half> %v, <vscale x 1
define half @vpreduce_ord_fadd_nxv1f16(half %s, <vscale x 1 x half> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vpreduce_ord_fadd_nxv1f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ZVFH-NEXT: vfmv.s.f v9, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; ZVFH-NEXT: vfredosum.vs v9, v8, v9, v0.t
@@ -67,7 +67,7 @@ declare half @llvm.vp.reduce.fadd.nxv2f16(half, <vscale x 2 x half>, <vscale x 2
define half @vpreduce_fadd_nxv2f16(half %s, <vscale x 2 x half> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vpreduce_fadd_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ZVFH-NEXT: vfmv.s.f v9, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfredusum.vs v9, v8, v9, v0.t
@@ -93,7 +93,7 @@ define half @vpreduce_fadd_nxv2f16(half %s, <vscale x 2 x half> %v, <vscale x 2
define half @vpreduce_ord_fadd_nxv2f16(half %s, <vscale x 2 x half> %v, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vpreduce_ord_fadd_nxv2f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; ZVFH-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ZVFH-NEXT: vfmv.s.f v9, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; ZVFH-NEXT: vfredosum.vs v9, v8, v9, v0.t
@@ -389,7 +389,7 @@ declare float @llvm.vp.reduce.fadd.nxv1f32(float, <vscale x 1 x float>, <vscale
define float @vpreduce_fadd_nxv1f32(float %s, <vscale x 1 x float> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_fadd_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfredusum.vs v9, v8, v9, v0.t
@@ -402,7 +402,7 @@ define float @vpreduce_fadd_nxv1f32(float %s, <vscale x 1 x float> %v, <vscale x
define float @vpreduce_ord_fadd_nxv1f32(float %s, <vscale x 1 x float> %v, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_ord_fadd_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vfmv.s.f v9, fa0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfredosum.vs v9, v8, v9, v0.t
>From f930535ba4e0acffe1f3f6eb664dde8c40412a9d Mon Sep 17 00:00:00 2001
From: Luke Lau <luke at igalia.com>
Date: Mon, 29 Jul 2024 19:10:08 +0800
Subject: [PATCH 2/2] Fix indentation
---
llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 6da4fa8106243..3f1fd62b1e143 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6790,10 +6790,9 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
Constraints = "$rd = $rs1" in
def "PseudoVFMV_S_" # f.FX :
- Pseudo<(outs VR:$rd),
- (ins VR:$rs1, f.fprclass:$rs2,
- AVL:$vl, ixlenimm:$sew),
- []>,
+ Pseudo<(outs VR:$rd),
+ (ins VR:$rs1, f.fprclass:$rs2, AVL:$vl, ixlenimm:$sew),
+ []>,
Sched<[WriteVMovSF, ReadVMovSF_V, ReadVMovSF_F]>,
RISCVVPseudo;
}
More information about the llvm-commits
mailing list