[llvm] [RISCV] Add missing lmul info for SiFive extensions (PR #76006)

via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 19 20:17:43 PST 2023


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-backend-risc-v

Author: Brandon Wu (4vtomat)

<details>
<summary>Changes</summary>



---

Patch is 28.03 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/76006.diff


10 Files Affected:

- (modified) llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td (+2) 
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll (+8-8) 
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_2x8x2.ll (+6-6) 
- (modified) llvm/test/CodeGen/RISCV/rvv/sf_vqmaccus_4x8x4.ll (+6-6) 


``````````diff
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
index fa618b437ce779..a16fa7e769929e 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXSf.td
@@ -351,11 +351,13 @@ multiclass VPseudoSiFiveVMACC<string mx, VReg vd_type, VReg vs2_type,
 
 multiclass VPseudoSiFiveVQMACC<string Constraint = ""> {
   foreach m = MxListVF8 in
+    let VLMul = m.value in
     defm NAME : VPseudoSiFiveVMACC<m.MX, m.vrclass, m.vrclass, Constraint>;
 }
 
 multiclass VPseudoSiFiveVFWMACC<string Constraint = ""> {
   foreach m = MxListFW in
+    let VLMul = m.value in
     defm NAME : VPseudoSiFiveVMACC<m.MX, m.wvrclass, m.vrclass, Constraint>;
 }
 
diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll
index 180155139b57b2..e0da3e846759f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vfwmacc_4x4x4.ll
@@ -13,7 +13,7 @@ declare <vscale x 1 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv1f32.nxv4bf16.nxv1b
 define <vscale x 1 x float> @intrinsic_vfwmacc_4x4x4_tu_f32mf2(<vscale x 1 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -29,7 +29,7 @@ entry:
 define <vscale x 1 x float> @intrinsic_vfwmacc_4x4x4_ta_f32mf2(<vscale x 1 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 1 x bfloat> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_ta_f32mf2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -51,7 +51,7 @@ declare <vscale x 2 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv2f32.nxv4bf16.nxv2b
 define <vscale x 2 x float> @intrinsic_vfwmacc_4x4x4_tu_f32m1(<vscale x 2 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -67,7 +67,7 @@ entry:
 define <vscale x 2 x float> @intrinsic_vfwmacc_4x4x4_ta_f32m1(<vscale x 2 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 2 x bfloat> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_ta_f32m1:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v9, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -127,7 +127,7 @@ declare <vscale x 8 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv8f32.nxv4bf16.nxv8b
 define <vscale x 8 x float> @intrinsic_vfwmacc_4x4x4_tu_f32m4(<vscale x 8 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v12, v14
 ; CHECK-NEXT:    ret
 entry:
@@ -143,7 +143,7 @@ entry:
 define <vscale x 8 x float> @intrinsic_vfwmacc_4x4x4_ta_f32m4(<vscale x 8 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 8 x bfloat> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_ta_f32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v12, v14
 ; CHECK-NEXT:    ret
 entry:
@@ -165,7 +165,7 @@ declare <vscale x 16 x float> @llvm.riscv.sf.vfwmacc.4x4x4.nxv16f32.nxv4bf16.nxv
 define <vscale x 16 x float> @intrinsic_vfwmacc_4x4x4_tu_f32m8(<vscale x 16 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_tu_f32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v16, v20
 ; CHECK-NEXT:    ret
 entry:
@@ -181,7 +181,7 @@ entry:
 define <vscale x 16 x float> @intrinsic_vfwmacc_4x4x4_ta_f32m8(<vscale x 16 x float> %0, <vscale x 4 x bfloat> %1, <vscale x 16 x bfloat> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_4x4x4_ta_f32m8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    sf.vfwmacc.4x4x4 v8, v16, v20
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll
index 0a3623c236486c..25256f7914931c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_2x8x2.ll
@@ -51,7 +51,7 @@ declare <vscale x 4 x i32> @llvm.riscv.sf.vqmacc.2x8x2.nxv4i32.nxv8i8.nxv16i8(
 define <vscale x 4 x i32> @intrinsic_vqmacc_2x8x2_tu_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    sf.vqmacc.2x8x2 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -67,7 +67,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vqmacc_2x8x2_ta_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_ta_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    sf.vqmacc.2x8x2 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -89,7 +89,7 @@ declare <vscale x 8 x i32> @llvm.riscv.sf.vqmacc.2x8x2.nxv8i32.nxv8i8.nxv32i8(
 define <vscale x 8 x i32> @intrinsic_vqmacc_2x8x2_tu_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    sf.vqmacc.2x8x2 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -105,7 +105,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vqmacc_2x8x2_ta_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_ta_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    sf.vqmacc.2x8x2 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -128,7 +128,7 @@ define <vscale x 16 x i32> @intrinsic_vqmacc_2x8x2_tu_i32m8(<vscale x 16 x i32>
 ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_tu_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
 ; CHECK-NEXT:    sf.vqmacc.2x8x2 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
@@ -145,7 +145,7 @@ define <vscale x 16 x i32> @intrinsic_vqmacc_2x8x2_ta_i32m8(<vscale x 16 x i32>
 ; CHECK-LABEL: intrinsic_vqmacc_2x8x2_ta_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    sf.vqmacc.2x8x2 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll
index 843e4bda4d123f..2d591be2adc21c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmacc_4x8x4.ll
@@ -51,7 +51,7 @@ declare <vscale x 4 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv4i32.nxv8i8.nxv16i8(
 define <vscale x 4 x i32> @intrinsic_vqmacc_4x8x4_tu_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    sf.vqmacc.4x8x4 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -67,7 +67,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vqmacc_4x8x4_ta_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_ta_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    sf.vqmacc.4x8x4 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -89,7 +89,7 @@ declare <vscale x 8 x i32> @llvm.riscv.sf.vqmacc.4x8x4.nxv8i32.nxv8i8.nxv32i8(
 define <vscale x 8 x i32> @intrinsic_vqmacc_4x8x4_tu_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    sf.vqmacc.4x8x4 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -105,7 +105,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vqmacc_4x8x4_ta_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_ta_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    sf.vqmacc.4x8x4 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -128,7 +128,7 @@ define <vscale x 16 x i32> @intrinsic_vqmacc_4x8x4_tu_i32m8(<vscale x 16 x i32>
 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_tu_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
 ; CHECK-NEXT:    sf.vqmacc.4x8x4 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
@@ -145,7 +145,7 @@ define <vscale x 16 x i32> @intrinsic_vqmacc_4x8x4_ta_i32m8(<vscale x 16 x i32>
 ; CHECK-LABEL: intrinsic_vqmacc_4x8x4_ta_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    sf.vqmacc.4x8x4 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll
index 106d3183991c79..8d61901107931f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_2x8x2.ll
@@ -51,7 +51,7 @@ declare <vscale x 4 x i32> @llvm.riscv.sf.vqmaccsu.2x8x2.nxv4i32.nxv8i8.nxv16i8(
 define <vscale x 4 x i32> @intrinsic_vqmaccsu_2x8x2_tu_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    sf.vqmaccsu.2x8x2 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -67,7 +67,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vqmaccsu_2x8x2_ta_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_ta_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    sf.vqmaccsu.2x8x2 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -89,7 +89,7 @@ declare <vscale x 8 x i32> @llvm.riscv.sf.vqmaccsu.2x8x2.nxv8i32.nxv8i8.nxv32i8(
 define <vscale x 8 x i32> @intrinsic_vqmaccsu_2x8x2_tu_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    sf.vqmaccsu.2x8x2 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -105,7 +105,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vqmaccsu_2x8x2_ta_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_ta_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    sf.vqmaccsu.2x8x2 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -128,7 +128,7 @@ define <vscale x 16 x i32> @intrinsic_vqmaccsu_2x8x2_tu_i32m8(<vscale x 16 x i32
 ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_tu_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
 ; CHECK-NEXT:    sf.vqmaccsu.2x8x2 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
@@ -145,7 +145,7 @@ define <vscale x 16 x i32> @intrinsic_vqmaccsu_2x8x2_ta_i32m8(<vscale x 16 x i32
 ; CHECK-LABEL: intrinsic_vqmaccsu_2x8x2_ta_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    sf.vqmaccsu.2x8x2 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll
index 45a3b22d9618a5..bfdab33965c131 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccsu_4x8x4.ll
@@ -51,7 +51,7 @@ declare <vscale x 4 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv4i32.nxv8i8.nxv16i8(
 define <vscale x 4 x i32> @intrinsic_vqmaccsu_4x8x4_tu_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    sf.vqmaccsu.4x8x4 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -67,7 +67,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vqmaccsu_4x8x4_ta_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_ta_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    sf.vqmaccsu.4x8x4 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -89,7 +89,7 @@ declare <vscale x 8 x i32> @llvm.riscv.sf.vqmaccsu.4x8x4.nxv8i32.nxv8i8.nxv32i8(
 define <vscale x 8 x i32> @intrinsic_vqmaccsu_4x8x4_tu_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    sf.vqmaccsu.4x8x4 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -105,7 +105,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vqmaccsu_4x8x4_ta_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_ta_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    sf.vqmaccsu.4x8x4 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -128,7 +128,7 @@ define <vscale x 16 x i32> @intrinsic_vqmaccsu_4x8x4_tu_i32m8(<vscale x 16 x i32
 ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_tu_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
 ; CHECK-NEXT:    sf.vqmaccsu.4x8x4 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
@@ -145,7 +145,7 @@ define <vscale x 16 x i32> @intrinsic_vqmaccsu_4x8x4_ta_i32m8(<vscale x 16 x i32
 ; CHECK-LABEL: intrinsic_vqmaccsu_4x8x4_ta_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    sf.vqmaccsu.4x8x4 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll
index d9ab4559eac690..6667a89052e9c6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_2x8x2.ll
@@ -51,7 +51,7 @@ declare <vscale x 4 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv4i32.nxv8i8.nxv16i8(
 define <vscale x 4 x i32> @intrinsic_vqmaccu_2x8x2_tu_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    sf.vqmaccu.2x8x2 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -67,7 +67,7 @@ entry:
 define <vscale x 4 x i32> @intrinsic_vqmaccu_2x8x2_ta_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_ta_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
 ; CHECK-NEXT:    sf.vqmaccu.2x8x2 v8, v10, v12
 ; CHECK-NEXT:    ret
 entry:
@@ -89,7 +89,7 @@ declare <vscale x 8 x i32> @llvm.riscv.sf.vqmaccu.2x8x2.nxv8i32.nxv8i8.nxv32i8(
 define <vscale x 8 x i32> @intrinsic_vqmaccu_2x8x2_tu_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    sf.vqmaccu.2x8x2 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -105,7 +105,7 @@ entry:
 define <vscale x 8 x i32> @intrinsic_vqmaccu_2x8x2_ta_i32m4(<vscale x 8 x i32> %0, <vscale x 8 x i8> %1, <vscale x 32 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_ta_i32m4:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
 ; CHECK-NEXT:    sf.vqmaccu.2x8x2 v8, v12, v16
 ; CHECK-NEXT:    ret
 entry:
@@ -128,7 +128,7 @@ define <vscale x 16 x i32> @intrinsic_vqmaccu_2x8x2_tu_i32m8(<vscale x 16 x i32>
 ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_tu_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
 ; CHECK-NEXT:    sf.vqmaccu.2x8x2 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
@@ -145,7 +145,7 @@ define <vscale x 16 x i32> @intrinsic_vqmaccu_2x8x2_ta_i32m8(<vscale x 16 x i32>
 ; CHECK-LABEL: intrinsic_vqmaccu_2x8x2_ta_i32m8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vl8r.v v24, (a0)
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, ma
 ; CHECK-NEXT:    sf.vqmaccu.2x8x2 v8, v16, v24
 ; CHECK-NEXT:    ret
 entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll
index 9d15ab68a091a1..d1565fb9a634f1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sf_vqmaccu_4x8x4.ll
@@ -51,7 +51,7 @@ declare <vscale x 4 x i32> @llvm.riscv.sf.vqmaccu.4x8x4.nxv4i32.nxv8i8.nxv16i8(
 define <vscale x 4 x i32> @intrinsic_vqmaccu_4x8x4_tu_i32m2(<vscale x 4 x i32> %0, <vscale x 8 x i8> %1, <vscale x 16 x i8> %2, iXLen %3) nounwind {
 ; CHECK-LABEL: intrinsic_vqmaccu_4x8x4_tu_i32m2:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    sf.vqmaccu....
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/76006


More information about the llvm-commits mailing list