[llvm] [RISCV] Use per-SEW immediate inversion for vrol intrinsic patterns (PR #190113)

Jim Lin via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 1 22:59:38 PDT 2026


https://github.com/tclin914 created https://github.com/llvm/llvm-project/pull/190113

The VPatBinaryV_VI_VROL multiclass was using InvRot64Imm for all SEW widths when converting vrol immediate intrinsics to vror.vi. This produced unnecessarily large immediates for narrower element types (e.g., 61 instead of 5 for SEW=8 rotate-left by 3).

Use the appropriate InvRot{SEW}Imm transform to match what the SDNode patterns already do.

>From 9ed987e6ce8612c29b4daf1a58d56632f722f8f8 Mon Sep 17 00:00:00 2001
From: Jim Lin <jim at andestech.com>
Date: Thu, 2 Apr 2026 08:55:21 +0800
Subject: [PATCH] [RISCV] Use per-SEW immediate inversion for vrol intrinsic
 patterns

The VPatBinaryV_VI_VROL multiclass was using InvRot64Imm for all SEW
widths when converting vrol immediate intrinsics to vror.vi. This
produced unnecessarily large immediates for narrower element types
(e.g., 61 instead of 5 for SEW=8 rotate-left by 3).

Use the appropriate InvRot{SEW}Imm transform to match what the SDNode
patterns already do.

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply at anthropic.com>
---
 llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td |  5 +-
 llvm/test/CodeGen/RISCV/rvv/vrol.ll        | 72 +++++++++++-----------
 2 files changed, 39 insertions(+), 38 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
index fd9e6f282e60b..cb84c1ff3c163 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvk.td
@@ -1047,6 +1047,7 @@ multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
     defvar Pseudo = !cast<Instruction>(
         !if(isSEWAware, instruction#"_VI_"#vti.LMul.MX#"_E"#vti.SEW,
                         instruction#"_VI_"#vti.LMul.MX));
+    defvar InvRotImm = !cast<SDNodeXForm>("InvRot" # vti.SEW # "Imm");
     let Predicates = !listconcat(ExtraPreds, GetVTypePredicates<vti>.Predicates) in
     def : Pat<(vti.Vector (Intr (vti.Vector vti.RegClass:$passthru),
                           (vti.Vector vti.RegClass:$rs2),
@@ -1054,7 +1055,7 @@ multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
                           VLOpFrag)),
                           (Pseudo (vti.Vector vti.RegClass:$passthru),
                           (vti.Vector vti.RegClass:$rs2),
-                          (InvRot64Imm uimm6:$rs1),
+                          (InvRotImm uimm6:$rs1),
                           GPR:$vl, vti.Log2SEW, TU_MU)>;
 
     defvar IntrMask = !cast<Intrinsic>(intrinsic#"_mask");
@@ -1069,7 +1070,7 @@ multiclass VPatBinaryV_VI_VROL<string intrinsic, string instruction,
                           VLOpFrag, (XLenVT timm:$policy))),
                           (PseudoMask (vti.Vector vti.RegClass:$passthru),
                           (vti.Vector vti.RegClass:$rs2),
-                          (InvRot64Imm uimm6:$rs1),
+                          (InvRotImm uimm6:$rs1),
                           (vti.Mask VMV0:$vm),
                           GPR:$vl, vti.Log2SEW, (XLenVT timm:$policy))>;
     }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrol.ll b/llvm/test/CodeGen/RISCV/rvv/vrol.ll
index af57eace297c9..c2d78b207a4e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrol.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrol.ll
@@ -1464,7 +1464,7 @@ define <vscale x 1 x i8> @intrinsic_vrol_vi_nxv1i8(<vscale x 1 x i8> %0, iXLen %
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 6
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8(
@@ -1480,7 +1480,7 @@ define <vscale x 1 x i8> @intrinsic_vrol_mask_vi_nxv1i8(<vscale x 1 x i8> %0, <v
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v9, 6, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8(
@@ -1497,7 +1497,7 @@ define <vscale x 2 x i8> @intrinsic_vrol_vi_nxv2i8(<vscale x 2 x i8> %0, iXLen %
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 6
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8(
@@ -1513,7 +1513,7 @@ define <vscale x 2 x i8> @intrinsic_vrol_mask_vi_nxv2i8(<vscale x 2 x i8> %0, <v
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v9, 6, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8(
@@ -1530,7 +1530,7 @@ define <vscale x 4 x i8> @intrinsic_vrol_vi_nxv4i8(<vscale x 4 x i8> %0, iXLen %
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 6
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8(
@@ -1546,7 +1546,7 @@ define <vscale x 4 x i8> @intrinsic_vrol_mask_vi_nxv4i8(<vscale x 4 x i8> %0, <v
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v9, 6, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8(
@@ -1563,7 +1563,7 @@ define <vscale x 8 x i8> @intrinsic_vrol_vi_nxv8i8(<vscale x 8 x i8> %0, iXLen %
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 6
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8(
@@ -1579,7 +1579,7 @@ define <vscale x 8 x i8> @intrinsic_vrol_mask_vi_nxv8i8(<vscale x 8 x i8> %0, <v
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v9, 6, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8(
@@ -1596,7 +1596,7 @@ define <vscale x 16 x i8> @intrinsic_vrol_vi_nxv16i8(<vscale x 16 x i8> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 6
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8(
@@ -1612,7 +1612,7 @@ define <vscale x 16 x i8> @intrinsic_vrol_mask_vi_nxv16i8(<vscale x 16 x i8> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT:    vror.vi v8, v10, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v10, 6, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8(
@@ -1629,7 +1629,7 @@ define <vscale x 32 x i8> @intrinsic_vrol_vi_nxv32i8(<vscale x 32 x i8> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 6
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8(
@@ -1645,7 +1645,7 @@ define <vscale x 32 x i8> @intrinsic_vrol_mask_vi_nxv32i8(<vscale x 32 x i8> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT:    vror.vi v8, v12, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v12, 6, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8(
@@ -1662,7 +1662,7 @@ define <vscale x 64 x i8> @intrinsic_vrol_vi_nxv64i8(<vscale x 64 x i8> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 6
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8(
@@ -1678,7 +1678,7 @@ define <vscale x 64 x i8> @intrinsic_vrol_mask_vi_nxv64i8(<vscale x 64 x i8> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv64i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
-; CHECK-NEXT:    vror.vi v8, v16, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v16, 6, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8(
@@ -1695,7 +1695,7 @@ define <vscale x 1 x i16> @intrinsic_vrol_vi_nxv1i16(<vscale x 1 x i16> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 14
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16(
@@ -1711,7 +1711,7 @@ define <vscale x 1 x i16> @intrinsic_vrol_mask_vi_nxv1i16(<vscale x 1 x i16> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v9, 14, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16(
@@ -1728,7 +1728,7 @@ define <vscale x 2 x i16> @intrinsic_vrol_vi_nxv2i16(<vscale x 2 x i16> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 14
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16(
@@ -1744,7 +1744,7 @@ define <vscale x 2 x i16> @intrinsic_vrol_mask_vi_nxv2i16(<vscale x 2 x i16> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v9, 14, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16(
@@ -1761,7 +1761,7 @@ define <vscale x 4 x i16> @intrinsic_vrol_vi_nxv4i16(<vscale x 4 x i16> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 14
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16(
@@ -1777,7 +1777,7 @@ define <vscale x 4 x i16> @intrinsic_vrol_mask_vi_nxv4i16(<vscale x 4 x i16> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v9, 14, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16(
@@ -1794,7 +1794,7 @@ define <vscale x 8 x i16> @intrinsic_vrol_vi_nxv8i16(<vscale x 8 x i16> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 14
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16(
@@ -1810,7 +1810,7 @@ define <vscale x 8 x i16> @intrinsic_vrol_mask_vi_nxv8i16(<vscale x 8 x i16> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT:    vror.vi v8, v10, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v10, 14, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16(
@@ -1827,7 +1827,7 @@ define <vscale x 16 x i16> @intrinsic_vrol_vi_nxv16i16(<vscale x 16 x i16> %0, i
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 14
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16(
@@ -1843,7 +1843,7 @@ define <vscale x 16 x i16> @intrinsic_vrol_mask_vi_nxv16i16(<vscale x 16 x i16>
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT:    vror.vi v8, v12, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v12, 14, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16(
@@ -1860,7 +1860,7 @@ define <vscale x 32 x i16> @intrinsic_vrol_vi_nxv32i16(<vscale x 32 x i16> %0, i
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 14
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16(
@@ -1876,7 +1876,7 @@ define <vscale x 32 x i16> @intrinsic_vrol_mask_vi_nxv32i16(<vscale x 32 x i16>
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
-; CHECK-NEXT:    vror.vi v8, v16, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v16, 14, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16(
@@ -1893,7 +1893,7 @@ define <vscale x 1 x i32> @intrinsic_vrol_vi_nxv1i32(<vscale x 1 x i32> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 30
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32(
@@ -1909,7 +1909,7 @@ define <vscale x 1 x i32> @intrinsic_vrol_mask_vi_nxv1i32(<vscale x 1 x i32> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v9, 30, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32(
@@ -1926,7 +1926,7 @@ define <vscale x 2 x i32> @intrinsic_vrol_vi_nxv2i32(<vscale x 2 x i32> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 30
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32(
@@ -1942,7 +1942,7 @@ define <vscale x 2 x i32> @intrinsic_vrol_mask_vi_nxv2i32(<vscale x 2 x i32> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT:    vror.vi v8, v9, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v9, 30, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32(
@@ -1959,7 +1959,7 @@ define <vscale x 4 x i32> @intrinsic_vrol_vi_nxv4i32(<vscale x 4 x i32> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 30
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32(
@@ -1975,7 +1975,7 @@ define <vscale x 4 x i32> @intrinsic_vrol_mask_vi_nxv4i32(<vscale x 4 x i32> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT:    vror.vi v8, v10, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v10, 30, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32(
@@ -1992,7 +1992,7 @@ define <vscale x 8 x i32> @intrinsic_vrol_vi_nxv8i32(<vscale x 8 x i32> %0, iXLe
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 30
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32(
@@ -2008,7 +2008,7 @@ define <vscale x 8 x i32> @intrinsic_vrol_mask_vi_nxv8i32(<vscale x 8 x i32> %0,
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT:    vror.vi v8, v12, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v12, 30, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32(
@@ -2025,7 +2025,7 @@ define <vscale x 16 x i32> @intrinsic_vrol_vi_nxv16i32(<vscale x 16 x i32> %0, i
 ; CHECK-LABEL: intrinsic_vrol_vi_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT:    vror.vi v8, v8, 62
+; CHECK-NEXT:    vror.vi v8, v8, 30
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32(
@@ -2041,7 +2041,7 @@ define <vscale x 16 x i32> @intrinsic_vrol_mask_vi_nxv16i32(<vscale x 16 x i32>
 ; CHECK-LABEL: intrinsic_vrol_mask_vi_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
-; CHECK-NEXT:    vror.vi v8, v16, 62, v0.t
+; CHECK-NEXT:    vror.vi v8, v16, 30, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32(



More information about the llvm-commits mailing list