[llvm] f67c2cd - [RISCV] Handle Zvabd and XRivosVizip EEWs in RISCVVLOptimizer (#184117)

via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 2 23:05:41 PST 2026


Author: Luke Lau
Date: 2026-03-03T07:05:36Z
New Revision: f67c2cd75e256af054919bee68886ebd51155b39

URL: https://github.com/llvm/llvm-project/commit/f67c2cd75e256af054919bee68886ebd51155b39
DIFF: https://github.com/llvm/llvm-project/commit/f67c2cd75e256af054919bee68886ebd51155b39.diff

LOG: [RISCV] Handle Zvabd and XRivosVizip EEWs in RISCVVLOptimizer (#184117)

This allows the VL optimizer to handle more cases that
RISCVVectorPeephole currently catches.

The XRivosVizip instructions have ReadsPastVL=true, so only the vl of
the zip instruction itself is reduced, not its inputs.

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave2.ll
    llvm/test/CodeGen/RISCV/rvv/vabd.ll
    llvm/test/CodeGen/RISCV/rvv/vabdu.ll
    llvm/test/CodeGen/RISCV/rvv/vl-opt.mir

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index 99eaf2683e1cc..fce023cadd99c 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -586,6 +586,19 @@ static std::optional<unsigned> getOperandLog2EEW(const MachineOperand &MO) {
   // Vector Carry-less Multiply Return High Half
   case RISCV::VCLMULH_VV:
   case RISCV::VCLMULH_VX:
+
+  // Zvabd
+  case RISCV::VABS_V:
+  case RISCV::VABD_VV:
+  case RISCV::VABDU_VV:
+
+  // XRivosVizip
+  case RISCV::RI_VZIPEVEN_VV:
+  case RISCV::RI_VZIPODD_VV:
+  case RISCV::RI_VZIP2A_VV:
+  case RISCV::RI_VZIP2B_VV:
+  case RISCV::RI_VUNZIP2A_VV:
+  case RISCV::RI_VUNZIP2B_VV:
     return MILog2SEW;
 
   // Vector Widening Shift Left Logical (Zvbb)
@@ -651,6 +664,9 @@ static std::optional<unsigned> getOperandLog2EEW(const MachineOperand &MO) {
   case RISCV::VFWCVT_F_X_V:
   case RISCV::VFWCVT_F_F_V:
   case RISCV::VFWCVTBF16_F_F_V:
+  // Zvabd
+  case RISCV::VWABDA_VV:
+  case RISCV::VWABDAU_VV:
     return IsMODef ? MILog2SEW + 1 : MILog2SEW;
 
   // Def and Op1 uses EEW=2*SEW. Op2 uses EEW=SEW.

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave2.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave2.ll
index 06c3bc656f16a..3a14f87c3f18a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave2.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave2.ll
@@ -415,8 +415,8 @@ define void @vnsrl_0_double(ptr %in, ptr %out) {
 ; ZIP:       # %bb.0: # %entry
 ; ZIP-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
 ; ZIP-NEXT:    vle64.v v8, (a0)
-; ZIP-NEXT:    ri.vunzip2a.vv v10, v8, v9
 ; ZIP-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZIP-NEXT:    ri.vunzip2a.vv v10, v8, v9
 ; ZIP-NEXT:    vse64.v v10, (a1)
 ; ZIP-NEXT:    ret
 entry:
@@ -450,8 +450,8 @@ define void @vnsrl_64_double(ptr %in, ptr %out) {
 ; ZIP:       # %bb.0: # %entry
 ; ZIP-NEXT:    vsetivli zero, 4, e64, m1, ta, ma
 ; ZIP-NEXT:    vle64.v v8, (a0)
-; ZIP-NEXT:    ri.vunzip2b.vv v10, v8, v9
 ; ZIP-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; ZIP-NEXT:    ri.vunzip2b.vv v10, v8, v9
 ; ZIP-NEXT:    vse64.v v10, (a1)
 ; ZIP-NEXT:    ret
 entry:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vabd.ll b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
index f3750e7f4513d..189131ee14360 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vabd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vabd.ll
@@ -167,8 +167,8 @@ define <vscale x 64 x i8> @vabd_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i8
 define <vscale x 64 x i8> @vabd_vv_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, iXLen %vl) {
 ; CHECK-LABEL: vabd_vv_mask_i8m8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vle8.v v24, (a0)
 ; CHECK-NEXT:    vabd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.riscv.vabd.mask(
@@ -318,8 +318,8 @@ define <vscale x 32 x i16> @vabd_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32 x
 define <vscale x 32 x i16> @vabd_vv_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask,iXLen %vl) {
 ; CHECK-LABEL: vabd_vv_mask_i16m8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vle16.v v24, (a0)
 ; CHECK-NEXT:    vabd.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.riscv.vabd.mask(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
index e629e41b61172..0592b28b370b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vabdu.ll
@@ -167,8 +167,8 @@ define <vscale x 64 x i8> @vabdu_vv_i8m8(<vscale x 64 x i8> %a, <vscale x 64 x i
 define <vscale x 64 x i8> @vabdu_vv_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %a, <vscale x 64 x i8> %b, <vscale x 64 x i1> %mask, iXLen %vl) {
 ; CHECK-LABEL: vabdu_vv_mask_i8m8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vl8r.v v24, (a0)
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vle8.v v24, (a0)
 ; CHECK-NEXT:    vabdu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
   %res = call <vscale x 64 x i8> @llvm.riscv.vabdu.mask(
@@ -318,8 +318,8 @@ define <vscale x 32 x i16> @vabdu_vv_i16m8(<vscale x 32 x i16> %a, <vscale x 32
 define <vscale x 32 x i16> @vabdu_vv_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %a, <vscale x 32 x i16> %b, <vscale x 32 x i1> %mask, iXLen %vl) {
 ; CHECK-LABEL: vabdu_vv_mask_i16m8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vl8re16.v v24, (a0)
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vle16.v v24, (a0)
 ; CHECK-NEXT:    vabdu.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
   %res = call <vscale x 32 x i16> @llvm.riscv.vabdu.mask(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
index 111266d2e988c..cc2cdfb2f1a87 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt.mir
@@ -833,10 +833,173 @@ body: |
     ; CHECK-LABEL: name: vabs_v
     ; CHECK: liveins: $x8
     ; CHECK-NEXT: {{  $}}
-    ; CHECK-NEXT: %x:vr = PseudoVABS_V_M1 $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %x:vr = PseudoVABS_V_M1 $noreg, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
     ; CHECK-NEXT: $v8 = COPY %y
     %x:vr = PseudoVABS_V_M1 $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
     %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
     $v8 = COPY %y
 ...
+---
+name: vabd_v
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: vabd_v
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:vr = PseudoVABD_VV_M1 $noreg, $noreg, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %y
+    %x:vr = PseudoVABD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
+    %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %y
+...
+---
+name: vabdu_v
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: vabdu_v
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:vr = PseudoVABD_VV_M1 $noreg, $noreg, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %y
+    %x:vr = PseudoVABD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
+    %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %y
+...
+---
+name: vwabda_vv
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: vwabda_vv
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: early-clobber %x:vr = PseudoVWABDA_VV_MF2 $noreg, $noreg, $noreg, 1 /* vl */, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %y
+    %x:vr = PseudoVWABDA_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+    %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %y
+...
+---
+name: vwabdau_vv
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: vwabdau_vv
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: early-clobber %x:vr = PseudoVWABDAU_VV_MF2 $noreg, $noreg, $noreg, 1 /* vl */, 4 /* e16 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %y
+    %x:vr = PseudoVWABDAU_VV_MF2 $noreg, $noreg, $noreg, -1, 4 /* e16 */, 0 /* tu, mu */
+    %y:vr = PseudoVADD_VV_M1 $noreg, %x, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %y
+...
+---
+# XRivosVizip: these should't reduce the vl of their inputs.
+name: ri_vzipeven_vv
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: ri_vzipeven_vv
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: early-clobber %y:vr = PseudoRI_VZIPEVEN_VV_M1 $noreg, %x, %x, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %z
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
+    %y:vr = PseudoRI_VZIPEVEN_VV_M1 $noreg, %x, %x, -1, 5 /* e32 */, 0 /* tu, mu */
+    %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %z
+...
+---
+name: ri_vzipodd_vv
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: ri_vzipodd_vv
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: early-clobber %y:vr = PseudoRI_VZIPODD_VV_M1 $noreg, %x, %x, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %z
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
+    %y:vr = PseudoRI_VZIPODD_VV_M1 $noreg, %x, %x, -1, 5 /* e32 */, 0 /* tu, mu */
+    %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %z
+...
+---
+name: ri_vzip2a_vv
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: ri_vzip2a_vv
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: early-clobber %y:vr = PseudoRI_VZIP2A_VV_M1 $noreg, %x, %x, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %z
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
+    %y:vr = PseudoRI_VZIP2A_VV_M1 $noreg, %x, %x, -1, 5 /* e32 */, 0 /* tu, mu */
+    %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %z
+...
+---
+name: ri_vzip2b_vv
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: ri_vzip2b_vv
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: early-clobber %y:vr = PseudoRI_VZIP2B_VV_M1 $noreg, %x, %x, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %z
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
+    %y:vr = PseudoRI_VZIP2B_VV_M1 $noreg, %x, %x, -1, 5 /* e32 */, 0 /* tu, mu */
+    %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %z
+...
+---
+name: ri_vunzip2a_vv
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: ri_vunzip2a_vv
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: early-clobber %y:vr = PseudoRI_VUNZIP2A_VV_M1 $noreg, %x, %x, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %z
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
+    %y:vr = PseudoRI_VUNZIP2A_VV_M1 $noreg, %x, %x, -1, 5 /* e32 */, 0 /* tu, mu */
+    %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %z
+...
+---
+name: ri_vunzip2b_vv
+body: |
+  bb.0:
+    liveins: $x8
+    ; CHECK-LABEL: name: ri_vunzip2b_vv
+    ; CHECK: liveins: $x8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1 /* vl=VLMAX */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: early-clobber %y:vr = PseudoRI_VUNZIP2B_VV_M1 $noreg, %x, %x, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1 /* vl */, 5 /* e32 */, 0 /* tu, mu */
+    ; CHECK-NEXT: $v8 = COPY %z
+    %x:vr = PseudoVADD_VV_M1 $noreg, $noreg, $noreg, -1, 5 /* e32 */, 0 /* tu, mu */
+    %y:vr = PseudoRI_VUNZIP2B_VV_M1 $noreg, %x, %x, -1, 5 /* e32 */, 0 /* tu, mu */
+    %z:vr = PseudoVADD_VV_M1 $noreg, %y, $noreg, 1, 5 /* e32 */, 0 /* tu, mu */
+    $v8 = COPY %z
+...


        


More information about the llvm-commits mailing list