[llvm] [RISCV] AddEdge between first mask producer and user of V0 (PR #146855)

Liao Chunyu via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 3 20:57:30 PDT 2025


https://github.com/ChunyuLiao updated https://github.com/llvm/llvm-project/pull/146855

>From d8bcf5a577c744a0e00e400e0a8d9090823d1628 Mon Sep 17 00:00:00 2001
From: Liao Chunyu <chunyu at iscas.ac.cn>
Date: Wed, 2 Jul 2025 23:53:31 -0400
Subject: [PATCH] [RISCV] AddEdge between mask producer and user of V0 If there
 are multiple masks producers followed by multiple masked consumers, may a
 move(vmv* v0, vx) will be generated to save mask. By moving the mask's
 producer after the mask's use, the spill can be eliminated, and the move can
 be removed.

---
 .../RISCV/RISCVVectorMaskDAGMutation.cpp      |  20 ++-
 .../RISCV/rvv/combine-reduce-add-to-vcpop.ll  |   5 +-
 .../rvv/fixed-vectors-shuffle-deinterleave.ll |  26 ++-
 .../RISCV/rvv/vscale-vw-web-simplification.ll | 156 ++++++++----------
 4 files changed, 106 insertions(+), 101 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp
index be54a8c95a978..457cf832fa31b 100644
--- a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp
@@ -10,6 +10,10 @@
 // instructions and masked instructions, so that we can reduce the live range
 // overlaps of mask registers.
 //
+// If there are multiple masks producers followed by multiple masked
+// instructions, then at each masked instructions add dependency edges between
+// every producer and masked instruction.
+//
 // The reason why we need to do this:
 // 1. When tracking register pressure, we don't track physical registers.
 // 2. We have a RegisterClass for mask register (which is `VMV0`), but we don't
@@ -68,11 +72,25 @@ class RISCVVectorMaskDAGMutation : public ScheduleDAGMutation {
 
   void apply(ScheduleDAGInstrs *DAG) override {
     SUnit *NearestUseV0SU = nullptr;
+    SmallVector<SUnit *, 2> DefMask;
     for (SUnit &SU : DAG->SUnits) {
       const MachineInstr *MI = SU.getInstr();
-      if (MI->findRegisterUseOperand(RISCV::V0, TRI))
+      if (RISCVII::getLMul(MI->getDesc().TSFlags) != RISCVVType::LMUL_8 &&
+          isSoleUseCopyToV0(SU))
+        DefMask.push_back(&SU);
+
+      if (MI->findRegisterUseOperand(RISCV::V0, TRI)) {
         NearestUseV0SU = &SU;
 
+        // Copy may not be a real use, so skip it here.
+        if (DefMask.size() > 1 && !MI->isCopy())
+          for (SUnit *Def : DefMask)
+            DAG->addEdge(Def, SDep(&SU, SDep::Artificial));
+
+        if (DefMask.size() > 0)
+          DefMask.erase(DefMask.begin());
+      }
+
       if (NearestUseV0SU && NearestUseV0SU != &SU && isSoleUseCopyToV0(SU) &&
           // For LMUL=8 cases, there will be more possibilities to spill.
           // FIXME: We should use RegPressureTracker to do fine-grained
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
index 5dc532273b770..2bc6b332a1a79 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-reduce-add-to-vcpop.ll
@@ -428,10 +428,11 @@ define i32 @test_nxv256i1(<vscale x 256 x i1> %x) {
 ; CHECK-NEXT:    vmerge.vim v24, v24, 1, v0
 ; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v0, v6, a1
-; CHECK-NEXT:    vslidedown.vx v5, v7, a1
 ; CHECK-NEXT:    vsetvli a2, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vadd.vi v24, v24, 1, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v5
+; CHECK-NEXT:    vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v0, v7, a1
+; CHECK-NEXT:    vsetvli a2, zero, e32, m8, ta, mu
 ; CHECK-NEXT:    vadd.vi v8, v8, 1, v0.t
 ; CHECK-NEXT:    vadd.vv v8, v8, v24
 ; CHECK-NEXT:    addi a2, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll
index c11319ff335fd..67584ba8a82cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-deinterleave.ll
@@ -143,16 +143,15 @@ define void @deinterleave6_0_i8(ptr %in, ptr %out) {
 ; CHECK-LABEL: deinterleave6_0_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmv.v.i v0, 2
-; CHECK-NEXT:    vmv.v.i v8, 4
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v9, 8
+; CHECK-NEXT:    vslidedown.vi v9, v8, 8
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v9, v9, 5, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vrgather.vi v9, v10, 4, v0.t
-; CHECK-NEXT:    vse8.v v9, (a1)
+; CHECK-NEXT:    vslidedown.vi v8, v8, 5, v0.t
+; CHECK-NEXT:    vmv.v.i v0, 4
+; CHECK-NEXT:    vrgather.vi v8, v9, 4, v0.t
+; CHECK-NEXT:    vse8.v v8, (a1)
 ; CHECK-NEXT:    ret
 entry:
   %0 = load <16 x i8>, ptr %in, align 1
@@ -188,16 +187,15 @@ define void @deinterleave7_0_i8(ptr %in, ptr %out) {
 ; CHECK-LABEL: deinterleave7_0_i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT:    vle8.v v9, (a0)
+; CHECK-NEXT:    vle8.v v8, (a0)
 ; CHECK-NEXT:    vmv.v.i v0, 2
-; CHECK-NEXT:    vmv.v.i v8, 4
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v9, 8
+; CHECK-NEXT:    vslidedown.vi v9, v8, 8
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT:    vslidedown.vi v9, v9, 6, v0.t
-; CHECK-NEXT:    vmv1r.v v0, v8
-; CHECK-NEXT:    vrgather.vi v9, v10, 6, v0.t
-; CHECK-NEXT:    vse8.v v9, (a1)
+; CHECK-NEXT:    vslidedown.vi v8, v8, 6, v0.t
+; CHECK-NEXT:    vmv.v.i v0, 4
+; CHECK-NEXT:    vrgather.vi v8, v9, 6, v0.t
+; CHECK-NEXT:    vse8.v v8, (a1)
 ; CHECK-NEXT:    ret
 entry:
   %0 = load <16 x i8>, ptr %in, align 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
index 206838917d004..ad2ed47e67e64 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vscale-vw-web-simplification.ll
@@ -153,20 +153,19 @@ define <vscale x 2 x i32> @vwop_vscale_sext_i1i32_multiple_users(ptr %x, ptr %y,
 ; NO_FOLDING:       # %bb.0:
 ; NO_FOLDING-NEXT:    vsetvli a3, zero, e32, m1, ta, mu
 ; NO_FOLDING-NEXT:    vlm.v v8, (a0)
-; NO_FOLDING-NEXT:    vlm.v v9, (a1)
-; NO_FOLDING-NEXT:    vlm.v v10, (a2)
-; NO_FOLDING-NEXT:    vmv.v.i v11, 0
+; NO_FOLDING-NEXT:    vmv.v.i v10, 0
 ; NO_FOLDING-NEXT:    vmv.v.v v0, v8
-; NO_FOLDING-NEXT:    vmerge.vim v12, v11, -1, v0
+; NO_FOLDING-NEXT:    vmerge.vim v11, v10, -1, v0
+; NO_FOLDING-NEXT:    vlm.v v0, (a1)
+; NO_FOLDING-NEXT:    vlm.v v9, (a2)
+; NO_FOLDING-NEXT:    vmerge.vim v12, v10, -1, v0
 ; NO_FOLDING-NEXT:    vmv.v.v v0, v9
-; NO_FOLDING-NEXT:    vmerge.vim v9, v11, -1, v0
-; NO_FOLDING-NEXT:    vmv.v.v v0, v10
-; NO_FOLDING-NEXT:    vmerge.vim v10, v11, -1, v0
-; NO_FOLDING-NEXT:    vmul.vv v9, v12, v9
-; NO_FOLDING-NEXT:    vsub.vv v11, v12, v10
+; NO_FOLDING-NEXT:    vmerge.vim v9, v10, -1, v0
+; NO_FOLDING-NEXT:    vmul.vv v10, v11, v12
+; NO_FOLDING-NEXT:    vsub.vv v11, v11, v9
 ; NO_FOLDING-NEXT:    vmv.v.v v0, v8
-; NO_FOLDING-NEXT:    vadd.vi v10, v10, -1, v0.t
-; NO_FOLDING-NEXT:    vor.vv v8, v9, v10
+; NO_FOLDING-NEXT:    vadd.vi v9, v9, -1, v0.t
+; NO_FOLDING-NEXT:    vor.vv v8, v10, v9
 ; NO_FOLDING-NEXT:    vor.vv v8, v8, v11
 ; NO_FOLDING-NEXT:    ret
 ;
@@ -174,20 +173,19 @@ define <vscale x 2 x i32> @vwop_vscale_sext_i1i32_multiple_users(ptr %x, ptr %y,
 ; FOLDING:       # %bb.0:
 ; FOLDING-NEXT:    vsetvli a3, zero, e32, m1, ta, mu
 ; FOLDING-NEXT:    vlm.v v8, (a0)
-; FOLDING-NEXT:    vlm.v v9, (a1)
-; FOLDING-NEXT:    vlm.v v10, (a2)
-; FOLDING-NEXT:    vmv.v.i v11, 0
+; FOLDING-NEXT:    vmv.v.i v10, 0
 ; FOLDING-NEXT:    vmv.v.v v0, v8
-; FOLDING-NEXT:    vmerge.vim v12, v11, -1, v0
+; FOLDING-NEXT:    vmerge.vim v11, v10, -1, v0
+; FOLDING-NEXT:    vlm.v v0, (a1)
+; FOLDING-NEXT:    vlm.v v9, (a2)
+; FOLDING-NEXT:    vmerge.vim v12, v10, -1, v0
 ; FOLDING-NEXT:    vmv.v.v v0, v9
-; FOLDING-NEXT:    vmerge.vim v9, v11, -1, v0
-; FOLDING-NEXT:    vmv.v.v v0, v10
-; FOLDING-NEXT:    vmerge.vim v10, v11, -1, v0
-; FOLDING-NEXT:    vmul.vv v9, v12, v9
-; FOLDING-NEXT:    vsub.vv v11, v12, v10
+; FOLDING-NEXT:    vmerge.vim v9, v10, -1, v0
+; FOLDING-NEXT:    vmul.vv v10, v11, v12
+; FOLDING-NEXT:    vsub.vv v11, v11, v9
 ; FOLDING-NEXT:    vmv.v.v v0, v8
-; FOLDING-NEXT:    vadd.vi v10, v10, -1, v0.t
-; FOLDING-NEXT:    vor.vv v8, v9, v10
+; FOLDING-NEXT:    vadd.vi v9, v9, -1, v0.t
+; FOLDING-NEXT:    vor.vv v8, v10, v9
 ; FOLDING-NEXT:    vor.vv v8, v8, v11
 ; FOLDING-NEXT:    ret
   %a = load <vscale x 2 x i1>, ptr %x
@@ -209,20 +207,19 @@ define <vscale x 2 x i8> @vwop_vscale_sext_i1i8_multiple_users(ptr %x, ptr %y, p
 ; NO_FOLDING:       # %bb.0:
 ; NO_FOLDING-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
 ; NO_FOLDING-NEXT:    vlm.v v8, (a0)
-; NO_FOLDING-NEXT:    vlm.v v9, (a1)
-; NO_FOLDING-NEXT:    vlm.v v10, (a2)
-; NO_FOLDING-NEXT:    vmv.v.i v11, 0
+; NO_FOLDING-NEXT:    vmv.v.i v10, 0
 ; NO_FOLDING-NEXT:    vmv1r.v v0, v8
-; NO_FOLDING-NEXT:    vmerge.vim v12, v11, -1, v0
+; NO_FOLDING-NEXT:    vmerge.vim v11, v10, -1, v0
+; NO_FOLDING-NEXT:    vlm.v v0, (a1)
+; NO_FOLDING-NEXT:    vlm.v v9, (a2)
+; NO_FOLDING-NEXT:    vmerge.vim v12, v10, -1, v0
 ; NO_FOLDING-NEXT:    vmv1r.v v0, v9
-; NO_FOLDING-NEXT:    vmerge.vim v9, v11, -1, v0
-; NO_FOLDING-NEXT:    vmv1r.v v0, v10
-; NO_FOLDING-NEXT:    vmerge.vim v10, v11, -1, v0
-; NO_FOLDING-NEXT:    vmul.vv v9, v12, v9
-; NO_FOLDING-NEXT:    vsub.vv v11, v12, v10
+; NO_FOLDING-NEXT:    vmerge.vim v9, v10, -1, v0
+; NO_FOLDING-NEXT:    vmul.vv v10, v11, v12
+; NO_FOLDING-NEXT:    vsub.vv v11, v11, v9
 ; NO_FOLDING-NEXT:    vmv1r.v v0, v8
-; NO_FOLDING-NEXT:    vadd.vi v10, v10, -1, v0.t
-; NO_FOLDING-NEXT:    vor.vv v8, v9, v10
+; NO_FOLDING-NEXT:    vadd.vi v9, v9, -1, v0.t
+; NO_FOLDING-NEXT:    vor.vv v8, v10, v9
 ; NO_FOLDING-NEXT:    vor.vv v8, v8, v11
 ; NO_FOLDING-NEXT:    ret
 ;
@@ -230,20 +227,19 @@ define <vscale x 2 x i8> @vwop_vscale_sext_i1i8_multiple_users(ptr %x, ptr %y, p
 ; FOLDING:       # %bb.0:
 ; FOLDING-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
 ; FOLDING-NEXT:    vlm.v v8, (a0)
-; FOLDING-NEXT:    vlm.v v9, (a1)
-; FOLDING-NEXT:    vlm.v v10, (a2)
-; FOLDING-NEXT:    vmv.v.i v11, 0
+; FOLDING-NEXT:    vmv.v.i v10, 0
 ; FOLDING-NEXT:    vmv1r.v v0, v8
-; FOLDING-NEXT:    vmerge.vim v12, v11, -1, v0
+; FOLDING-NEXT:    vmerge.vim v11, v10, -1, v0
+; FOLDING-NEXT:    vlm.v v0, (a1)
+; FOLDING-NEXT:    vlm.v v9, (a2)
+; FOLDING-NEXT:    vmerge.vim v12, v10, -1, v0
 ; FOLDING-NEXT:    vmv1r.v v0, v9
-; FOLDING-NEXT:    vmerge.vim v9, v11, -1, v0
-; FOLDING-NEXT:    vmv1r.v v0, v10
-; FOLDING-NEXT:    vmerge.vim v10, v11, -1, v0
-; FOLDING-NEXT:    vmul.vv v9, v12, v9
-; FOLDING-NEXT:    vsub.vv v11, v12, v10
+; FOLDING-NEXT:    vmerge.vim v9, v10, -1, v0
+; FOLDING-NEXT:    vmul.vv v10, v11, v12
+; FOLDING-NEXT:    vsub.vv v11, v11, v9
 ; FOLDING-NEXT:    vmv1r.v v0, v8
-; FOLDING-NEXT:    vadd.vi v10, v10, -1, v0.t
-; FOLDING-NEXT:    vor.vv v8, v9, v10
+; FOLDING-NEXT:    vadd.vi v9, v9, -1, v0.t
+; FOLDING-NEXT:    vor.vv v8, v10, v9
 ; FOLDING-NEXT:    vor.vv v8, v8, v11
 ; FOLDING-NEXT:    ret
   %a = load <vscale x 2 x i1>, ptr %x
@@ -444,16 +440,14 @@ define <vscale x 2 x i32> @vwop_vscale_zext_i1i32_multiple_users(ptr %x, ptr %y,
 ; NO_FOLDING:       # %bb.0:
 ; NO_FOLDING-NEXT:    vsetvli a3, zero, e32, m1, ta, mu
 ; NO_FOLDING-NEXT:    vlm.v v0, (a0)
-; NO_FOLDING-NEXT:    vlm.v v8, (a2)
-; NO_FOLDING-NEXT:    vlm.v v9, (a1)
-; NO_FOLDING-NEXT:    vmv.v.i v10, 0
-; NO_FOLDING-NEXT:    vmerge.vim v11, v10, 1, v0
-; NO_FOLDING-NEXT:    vmv.v.v v0, v8
-; NO_FOLDING-NEXT:    vmerge.vim v8, v10, 1, v0
-; NO_FOLDING-NEXT:    vadd.vv v10, v11, v8
-; NO_FOLDING-NEXT:    vsub.vv v8, v11, v8
-; NO_FOLDING-NEXT:    vmv.v.v v0, v9
-; NO_FOLDING-NEXT:    vor.vv v10, v10, v11, v0.t
+; NO_FOLDING-NEXT:    vmv.v.i v8, 0
+; NO_FOLDING-NEXT:    vmerge.vim v9, v8, 1, v0
+; NO_FOLDING-NEXT:    vlm.v v0, (a2)
+; NO_FOLDING-NEXT:    vmerge.vim v8, v8, 1, v0
+; NO_FOLDING-NEXT:    vlm.v v0, (a1)
+; NO_FOLDING-NEXT:    vadd.vv v10, v9, v8
+; NO_FOLDING-NEXT:    vsub.vv v8, v9, v8
+; NO_FOLDING-NEXT:    vor.vv v10, v10, v9, v0.t
 ; NO_FOLDING-NEXT:    vor.vv v8, v10, v8
 ; NO_FOLDING-NEXT:    ret
 ;
@@ -461,16 +455,14 @@ define <vscale x 2 x i32> @vwop_vscale_zext_i1i32_multiple_users(ptr %x, ptr %y,
 ; FOLDING:       # %bb.0:
 ; FOLDING-NEXT:    vsetvli a3, zero, e32, m1, ta, mu
 ; FOLDING-NEXT:    vlm.v v0, (a0)
-; FOLDING-NEXT:    vlm.v v8, (a2)
-; FOLDING-NEXT:    vlm.v v9, (a1)
-; FOLDING-NEXT:    vmv.v.i v10, 0
-; FOLDING-NEXT:    vmerge.vim v11, v10, 1, v0
-; FOLDING-NEXT:    vmv.v.v v0, v8
-; FOLDING-NEXT:    vmerge.vim v8, v10, 1, v0
-; FOLDING-NEXT:    vadd.vv v10, v11, v8
-; FOLDING-NEXT:    vsub.vv v8, v11, v8
-; FOLDING-NEXT:    vmv.v.v v0, v9
-; FOLDING-NEXT:    vor.vv v10, v10, v11, v0.t
+; FOLDING-NEXT:    vmv.v.i v8, 0
+; FOLDING-NEXT:    vmerge.vim v9, v8, 1, v0
+; FOLDING-NEXT:    vlm.v v0, (a2)
+; FOLDING-NEXT:    vmerge.vim v8, v8, 1, v0
+; FOLDING-NEXT:    vlm.v v0, (a1)
+; FOLDING-NEXT:    vadd.vv v10, v9, v8
+; FOLDING-NEXT:    vsub.vv v8, v9, v8
+; FOLDING-NEXT:    vor.vv v10, v10, v9, v0.t
 ; FOLDING-NEXT:    vor.vv v8, v10, v8
 ; FOLDING-NEXT:    ret
   %a = load <vscale x 2 x i1>, ptr %x
@@ -492,16 +484,14 @@ define <vscale x 2 x i8> @vwop_vscale_zext_i1i8_multiple_users(ptr %x, ptr %y, p
 ; NO_FOLDING:       # %bb.0:
 ; NO_FOLDING-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
 ; NO_FOLDING-NEXT:    vlm.v v0, (a0)
-; NO_FOLDING-NEXT:    vlm.v v8, (a2)
-; NO_FOLDING-NEXT:    vlm.v v9, (a1)
-; NO_FOLDING-NEXT:    vmv.v.i v10, 0
-; NO_FOLDING-NEXT:    vmerge.vim v11, v10, 1, v0
-; NO_FOLDING-NEXT:    vmv1r.v v0, v8
-; NO_FOLDING-NEXT:    vmerge.vim v8, v10, 1, v0
-; NO_FOLDING-NEXT:    vadd.vv v10, v11, v8
-; NO_FOLDING-NEXT:    vsub.vv v8, v11, v8
-; NO_FOLDING-NEXT:    vmv1r.v v0, v9
-; NO_FOLDING-NEXT:    vor.vv v10, v10, v11, v0.t
+; NO_FOLDING-NEXT:    vmv.v.i v8, 0
+; NO_FOLDING-NEXT:    vmerge.vim v9, v8, 1, v0
+; NO_FOLDING-NEXT:    vlm.v v0, (a2)
+; NO_FOLDING-NEXT:    vmerge.vim v8, v8, 1, v0
+; NO_FOLDING-NEXT:    vlm.v v0, (a1)
+; NO_FOLDING-NEXT:    vadd.vv v10, v9, v8
+; NO_FOLDING-NEXT:    vsub.vv v8, v9, v8
+; NO_FOLDING-NEXT:    vor.vv v10, v10, v9, v0.t
 ; NO_FOLDING-NEXT:    vor.vv v8, v10, v8
 ; NO_FOLDING-NEXT:    ret
 ;
@@ -509,16 +499,14 @@ define <vscale x 2 x i8> @vwop_vscale_zext_i1i8_multiple_users(ptr %x, ptr %y, p
 ; FOLDING:       # %bb.0:
 ; FOLDING-NEXT:    vsetvli a3, zero, e8, mf4, ta, mu
 ; FOLDING-NEXT:    vlm.v v0, (a0)
-; FOLDING-NEXT:    vlm.v v8, (a2)
-; FOLDING-NEXT:    vlm.v v9, (a1)
-; FOLDING-NEXT:    vmv.v.i v10, 0
-; FOLDING-NEXT:    vmerge.vim v11, v10, 1, v0
-; FOLDING-NEXT:    vmv1r.v v0, v8
-; FOLDING-NEXT:    vmerge.vim v8, v10, 1, v0
-; FOLDING-NEXT:    vadd.vv v10, v11, v8
-; FOLDING-NEXT:    vsub.vv v8, v11, v8
-; FOLDING-NEXT:    vmv1r.v v0, v9
-; FOLDING-NEXT:    vor.vv v10, v10, v11, v0.t
+; FOLDING-NEXT:    vmv.v.i v8, 0
+; FOLDING-NEXT:    vmerge.vim v9, v8, 1, v0
+; FOLDING-NEXT:    vlm.v v0, (a2)
+; FOLDING-NEXT:    vmerge.vim v8, v8, 1, v0
+; FOLDING-NEXT:    vlm.v v0, (a1)
+; FOLDING-NEXT:    vadd.vv v10, v9, v8
+; FOLDING-NEXT:    vsub.vv v8, v9, v8
+; FOLDING-NEXT:    vor.vv v10, v10, v9, v0.t
 ; FOLDING-NEXT:    vor.vv v8, v10, v8
 ; FOLDING-NEXT:    ret
   %a = load <vscale x 2 x i1>, ptr %x



More information about the llvm-commits mailing list