[llvm] [RISCV] Remove vmv.s.x and vmv.x.s lmul pseudo variants (PR #71501)
via llvm-commits
llvm-commits at lists.llvm.org
Tue Nov 7 00:49:22 PST 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-risc-v
Author: Luke Lau (lukel97)
<details>
<summary>Changes</summary>
vmv.s.x and vmv.x.s ignore LMUL, so we can replace the PseudoVMV_S_X_MX and
PseudoVMV_X_S_MX with just one pseudo each. These pseudos use the VR register
class (just like the actual instruction), so for the tablegen patterns we need
to wrap LMUL>1 in subregister inserts/extracts.
The test diff is due to the fact that a PseudoVMV_S_X/PsuedoVMV_X_S no longer
carries any information about LMUL, so if it's the only vector pseudo
instruction in a block then it now default to LMUL=1.
---
Patch is 328.98 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/71501.diff
32 Files Affected:
- (modified) llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp (+3-1)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td (+26-21)
- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td (+24-7)
- (modified) llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll (+13-13)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll (+8-8)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-bitcast.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll (+2-2)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll (+85-85)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll (+4-4)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll (+42-42)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll (+365-501)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll (+156-156)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll (+47-47)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-vslide1up.ll (+1-1)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-store-asm.ll (+46-94)
- (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll (+25-25)
- (modified) llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv32.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/insertelt-int-rv64.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/vfmv.s.f.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv32.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmv.s.x-rv64.ll (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv32.ll (+15-15)
- (modified) llvm/test/CodeGen/RISCV/rvv/vmv.x.s-rv64.ll (+18-18)
- (modified) llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll (+55-55)
- (modified) llvm/test/CodeGen/RISCV/rvv/vreductions-int.ll (+12-12)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir (+6-6)
- (modified) llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.mir (+6-6)
``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
index 51a235bf2ca1861..5e0adf0ba72eeeb 100644
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -3362,7 +3362,9 @@ static bool usesAllOnesMask(SDNode *N, unsigned MaskOpIdx) {
static bool isImplicitDef(SDValue V) {
return V.isMachineOpcode() &&
- V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
+ (V.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF ||
+ (V.getMachineOpcode() == TargetOpcode::EXTRACT_SUBREG &&
+ isImplicitDef(V.getOperand(0))));
}
// Optimize masked RVV pseudo instructions with a known all-ones mask to their
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index bec67153b6543d4..9e4692843430837 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -6753,24 +6753,19 @@ defm PseudoVID : VPseudoVID_V;
let Predicates = [HasVInstructions] in {
let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
- foreach m = MxList in {
- defvar mx = m.MX;
- let VLMul = m.value in {
- let HasSEWOp = 1, BaseInstr = VMV_X_S in
- def PseudoVMV_X_S # "_" # mx:
- Pseudo<(outs GPR:$rd), (ins m.vrclass:$rs2, ixlenimm:$sew), []>,
- Sched<[WriteVIMovVX, ReadVIMovVX]>,
- RISCVVPseudo;
- let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
- Constraints = "$rd = $rs1" in
- def PseudoVMV_S_X # "_" # mx: Pseudo<(outs m.vrclass:$rd),
- (ins m.vrclass:$rs1, GPR:$rs2,
- AVL:$vl, ixlenimm:$sew),
- []>,
- Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>,
- RISCVVPseudo;
- }
- }
+ let HasSEWOp = 1, BaseInstr = VMV_X_S in
+ def PseudoVMV_X_S:
+ Pseudo<(outs GPR:$rd), (ins VR:$rs2, ixlenimm:$sew), []>,
+ Sched<[WriteVIMovVX, ReadVIMovVX]>,
+ RISCVVPseudo;
+ let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
+ Constraints = "$rd = $rs1" in
+ def PseudoVMV_S_X: Pseudo<(outs VR:$rd),
+ (ins VR:$rs1, GPR:$rs2,
+ AVL:$vl, ixlenimm:$sew),
+ []>,
+ Sched<[WriteVIMovXV, ReadVIMovXV, ReadVIMovXX]>,
+ RISCVVPseudo;
}
} // Predicates = [HasVInstructions]
@@ -7400,7 +7395,12 @@ defm : VPatNullaryV<"int_riscv_vid", "PseudoVID">;
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in
def : Pat<(XLenVT (riscv_vmv_x_s (vti.Vector vti.RegClass:$rs2))),
- (!cast<Instruction>("PseudoVMV_X_S_" # vti.LMul.MX) $rs2, vti.Log2SEW)>;
+ (PseudoVMV_X_S
+ !if(!isa<GroupVTypeInfo>(vti),
+ (!cast<GroupVTypeInfo>(vti).VectorM1
+ (EXTRACT_SUBREG $rs2, sub_vrm1_0)),
+ (vti.Vector $rs2)),
+ vti.Log2SEW)>;
// vmv.s.x is handled with a custom node in RISCVInstrInfoVVLPatterns.td
}
@@ -7418,10 +7418,15 @@ foreach fvti = AllFloatVectors in {
(fvti.Scalar fvti.ScalarRegClass:$rs2),
GPR:$vl, fvti.Log2SEW)>;
+ defvar is_group = !isa<GroupVTypeInfo>(fvti);
+ defvar merge = !if(is_group,
+ (!cast<GroupVTypeInfo>(fvti).VectorM1
+ (EXTRACT_SUBREG $rs1, sub_vrm1_0)),
+ (fvti.Vector $rs1));
+ defvar vmv_s_x = (PseudoVMV_S_X merge, (XLenVT X0), GPR:$vl, fvti.Log2SEW);
def : Pat<(fvti.Vector (int_riscv_vfmv_s_f (fvti.Vector fvti.RegClass:$rs1),
(fvti.Scalar (fpimm0)), VLOpFrag)),
- (!cast<Instruction>("PseudoVMV_S_X_" # fvti.LMul.MX)
- (fvti.Vector $rs1), (XLenVT X0), GPR:$vl, fvti.Log2SEW)>;
+ !if(is_group, (INSERT_SUBREG $rs1, vmv_s_x, sub_vrm1_0), vmv_s_x)>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index d92d3975d12f533..f510d3369dd5acc 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -2825,12 +2825,20 @@ foreach mti = AllMasks in {
// 16.4. Vector Register Gather Instruction
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
+ defvar is_group = !isa<GroupVTypeInfo>(vti);
+ defvar merge = !if(is_group,
+ (!cast<GroupVTypeInfo>(vti).VectorM1
+ (EXTRACT_SUBREG $merge, sub_vrm1_0)),
+ (vti.Vector $merge));
+ defvar vmv_s_x = (PseudoVMV_S_X merge,
+ (vti.Scalar vti.ScalarRegClass:$rs1),
+ GPR:$vl, vti.Log2SEW);
def : Pat<(vti.Vector (riscv_vmv_s_x_vl (vti.Vector vti.RegClass:$merge),
vti.ScalarRegClass:$rs1,
VLOpFrag)),
- (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
- vti.RegClass:$merge,
- (vti.Scalar vti.ScalarRegClass:$rs1), GPR:$vl, vti.Log2SEW)>;
+ !if(is_group, (INSERT_SUBREG $merge, vmv_s_x, sub_vrm1_0),
+ vmv_s_x)>;
+
def : Pat<(vti.Vector (riscv_vrgather_vv_vl vti.RegClass:$rs2,
vti.RegClass:$rs1,
@@ -2881,16 +2889,25 @@ foreach vti = AllIntegerVectors in {
// 16.2. Floating-Point Scalar Move Instructions
foreach vti = AllFloatVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
+ defvar is_group = !isa<GroupVTypeInfo>(vti);
+ defvar merge = !if(is_group,
+ (!cast<GroupVTypeInfo>(vti).VectorM1
+ (EXTRACT_SUBREG $merge, sub_vrm1_0)),
+ (vti.Vector $merge));
+ defvar vmv_s_x_x0 = (PseudoVMV_S_X merge, (XLenVT X0), GPR:$vl, vti.Log2SEW);
def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
(vti.Scalar (fpimm0)),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
- vti.RegClass:$merge, (XLenVT X0), GPR:$vl, vti.Log2SEW)>;
+ !if(is_group, (INSERT_SUBREG $merge, vmv_s_x_x0, sub_vrm1_0),
+ vmv_s_x_x0)>;
+
+ defvar vmv_s_x = (PseudoVMV_S_X merge, GPR:$imm, GPR:$vl, vti.Log2SEW);
def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
(vti.Scalar (SelectFPImm (XLenVT GPR:$imm))),
VLOpFrag)),
- (!cast<Instruction>("PseudoVMV_S_X_"#vti.LMul.MX)
- vti.RegClass:$merge, GPR:$imm, GPR:$vl, vti.Log2SEW)>;
+ !if(is_group, (INSERT_SUBREG $merge, vmv_s_x, sub_vrm1_0),
+ vmv_s_x)>;
+
def : Pat<(vti.Vector (riscv_vfmv_s_f_vl (vti.Vector vti.RegClass:$merge),
vti.ScalarRegClass:$rs1,
VLOpFrag)),
diff --git a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
index 8fb4be6b49ed648..600084632ce68a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/emergency-slot.mir
@@ -139,7 +139,7 @@ body: |
; CHECK-NEXT: renamable $v0 = VL1RE8_V killed $x10 :: (load unknown-size from %stack.1, align 8)
; CHECK-NEXT: $x10 = LD $x2, 8 :: (load (s64) from %stack.15)
; CHECK-NEXT: renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3 /* e8 */, 1 /* ta, mu */, implicit $vl, implicit $vtype
- ; CHECK-NEXT: renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3 /* e8 */, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: renamable $x13 = PseudoVMV_X_S killed renamable $v0, 3 /* e8 */, implicit $vl, implicit $vtype
; CHECK-NEXT: BLT killed renamable $x16, renamable $x27, %bb.2
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: bb.1:
@@ -206,7 +206,7 @@ body: |
renamable $x13 = nsw ADDI renamable $x16, -2
renamable $v0 = VL1RE8_V %stack.1 :: (load unknown-size from %stack.1, align 8)
renamable $v0 = PseudoVSLIDEDOWN_VX_M1 undef renamable $v0, killed renamable $v0, killed renamable $x13, $noreg, 3, 1, implicit $vl, implicit $vtype
- renamable $x13 = PseudoVMV_X_S_M1 killed renamable $v0, 3, implicit $vl, implicit $vtype
+ renamable $x13 = PseudoVMV_X_S killed renamable $v0, 3, implicit $vl, implicit $vtype
BLT killed renamable $x16, renamable $x27, %bb.2
bb.1:
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
index fd2f89e26e59809..d9fdec3041cb065 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv32.ll
@@ -8,7 +8,7 @@
define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
; CHECK-LABEL: extractelt_nxv1i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 0
@@ -40,7 +40,7 @@ define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
; CHECK-LABEL: extractelt_nxv2i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 0
@@ -72,7 +72,7 @@ define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 %idx) {
define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
; CHECK-LABEL: extractelt_nxv4i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 0
@@ -232,7 +232,7 @@ define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 %idx) {
define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
; CHECK-LABEL: extractelt_nxv1i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 0
@@ -264,7 +264,7 @@ define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 %idx) {
define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
; CHECK-LABEL: extractelt_nxv2i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 0
@@ -424,7 +424,7 @@ define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 %idx) {
define i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) {
; CHECK-LABEL: extractelt_nxv1i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i32> %v, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
index 34dcce3fe058bc9..c1a95ce1ae8143c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-int-rv64.ll
@@ -7,7 +7,7 @@
define signext i8 @extractelt_nxv1i8_0(<vscale x 1 x i8> %v) {
; CHECK-LABEL: extractelt_nxv1i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i8> %v, i32 0
@@ -39,7 +39,7 @@ define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 zeroext %idx)
define signext i8 @extractelt_nxv2i8_0(<vscale x 2 x i8> %v) {
; CHECK-LABEL: extractelt_nxv2i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i8> %v, i32 0
@@ -71,7 +71,7 @@ define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 zeroext %idx)
define signext i8 @extractelt_nxv4i8_0(<vscale x 4 x i8> %v) {
; CHECK-LABEL: extractelt_nxv4i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 4 x i8> %v, i32 0
@@ -231,7 +231,7 @@ define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 zeroext %id
define signext i16 @extractelt_nxv1i16_0(<vscale x 1 x i16> %v) {
; CHECK-LABEL: extractelt_nxv1i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i16> %v, i32 0
@@ -263,7 +263,7 @@ define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 zeroext %i
define signext i16 @extractelt_nxv2i16_0(<vscale x 2 x i16> %v) {
; CHECK-LABEL: extractelt_nxv2i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 2 x i16> %v, i32 0
@@ -423,7 +423,7 @@ define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 zeroext
define signext i32 @extractelt_nxv1i32_0(<vscale x 1 x i32> %v) {
; CHECK-LABEL: extractelt_nxv1i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%r = extractelement <vscale x 1 x i32> %v, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
index 47edb9eecb00bc1..e3ee5b54acafb22 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitcast.ll
@@ -32,13 +32,13 @@ define <32 x i1> @bitcast_v4i8_v32i1(<4 x i8> %a, <32 x i1> %b) {
define i8 @bitcast_v1i8_i8(<1 x i8> %a) {
; CHECK-LABEL: bitcast_v1i8_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
;
; ELEN32-LABEL: bitcast_v1i8_i8:
; ELEN32: # %bb.0:
-; ELEN32-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; ELEN32-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; ELEN32-NEXT: vmv.x.s a0, v8
; ELEN32-NEXT: ret
%b = bitcast <1 x i8> %a to i8
@@ -48,13 +48,13 @@ define i8 @bitcast_v1i8_i8(<1 x i8> %a) {
define i16 @bitcast_v2i8_i16(<2 x i8> %a) {
; CHECK-LABEL: bitcast_v2i8_i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
;
; ELEN32-LABEL: bitcast_v2i8_i16:
; ELEN32: # %bb.0:
-; ELEN32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ELEN32-NEXT: vmv.x.s a0, v8
; ELEN32-NEXT: ret
%b = bitcast <2 x i8> %a to i16
@@ -64,13 +64,13 @@ define i16 @bitcast_v2i8_i16(<2 x i8> %a) {
define i16 @bitcast_v1i16_i16(<1 x i16> %a) {
; CHECK-LABEL: bitcast_v1i16_i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
;
; ELEN32-LABEL: bitcast_v1i16_i16:
; ELEN32: # %bb.0:
-; ELEN32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ELEN32-NEXT: vmv.x.s a0, v8
; ELEN32-NEXT: ret
%b = bitcast <1 x i16> %a to i16
@@ -80,7 +80,7 @@ define i16 @bitcast_v1i16_i16(<1 x i16> %a) {
define i32 @bitcast_v4i8_i32(<4 x i8> %a) {
; CHECK-LABEL: bitcast_v4i8_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
;
@@ -96,7 +96,7 @@ define i32 @bitcast_v4i8_i32(<4 x i8> %a) {
define i32 @bitcast_v2i16_i32(<2 x i16> %a) {
; CHECK-LABEL: bitcast_v2i16_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
;
@@ -112,7 +112,7 @@ define i32 @bitcast_v2i16_i32(<2 x i16> %a) {
define i32 @bitcast_v1i32_i32(<1 x i32> %a) {
; CHECK-LABEL: bitcast_v1i32_i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
;
@@ -433,13 +433,13 @@ define double @bitcast_v1i64_f64(<1 x i64> %a) {
define <1 x i16> @bitcast_i16_v1i16(i16 %a) {
; CHECK-LABEL: bitcast_i16_v1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
;
; ELEN32-LABEL: bitcast_i16_v1i16:
; ELEN32: # %bb.0:
-; ELEN32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; ELEN32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; ELEN32-NEXT: vmv.s.x v8, a0
; ELEN32-NEXT: ret
%b = bitcast i16 %a to <1 x i16>
@@ -449,7 +449,7 @@ define <1 x i16> @bitcast_i16_v1i16(i16 %a) {
define <2 x i16> @bitcast_i32_v2i16(i32 %a) {
; CHECK-LABEL: bitcast_i32_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
;
@@ -465,7 +465,7 @@ define <2 x i16> @bitcast_i32_v2i16(i32 %a) {
define <1 x i32> @bitcast_i32_v1i32(i32 %a) {
; CHECK-LABEL: bitcast_i32_v1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
index 9d689c732d7999f..c71b2e687e5dff0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
@@ -106,7 +106,7 @@ define i1 @extractelt_v16i1(ptr %x, i64 %idx) nounwind {
; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32-NEXT: vle8.v v8, (a0)
; RV32-NEXT: vmseq.vi v8, v8, 0
-; RV32-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: srl a0, a0, a1
; RV32-NEXT: andi a0, a0, 1
@@ -117,7 +117,7 @@ define i1 @extractelt_v16i1(ptr %x, i64 %idx) nounwind {
; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV64-NEXT: vle8.v v8, (a0)
; RV64-NEXT: vmseq.vi v8, v8, 0
-; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV64-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: srl a0, a0, a1
; RV64-NEXT: andi a0, a0, 1
@@ -128,7 +128,7 @@ define i1 @extractelt_v16i1(ptr %x, i64 %idx) nounwind {
; RV32ZBS-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; RV32ZBS-NEXT: vle8.v v8, (a0)
; RV32ZBS-NEXT: vmseq.vi v8, v8, 0
-; RV32ZBS-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; RV32ZBS-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV32ZBS-NEXT: vmv.x.s a0, v8
; RV32ZBS-NEXT: bext a0, a0, a1
; RV32ZB...
[truncated]
``````````
</details>
https://github.com/llvm/llvm-project/pull/71501
More information about the llvm-commits
mailing list