[llvm] b577d2d - [RISCV] Add a pass to remove duplicate VSETVLI instructions in a basic block.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 11 10:37:38 PST 2020


Author: Craig Topper
Date: 2020-12-11T10:35:37-08:00
New Revision: b577d2df7bd650668a3538429ecb3d08e037fe55

URL: https://github.com/llvm/llvm-project/commit/b577d2df7bd650668a3538429ecb3d08e037fe55
DIFF: https://github.com/llvm/llvm-project/commit/b577d2df7bd650668a3538429ecb3d08e037fe55.diff

LOG: [RISCV] Add a pass to remove duplicate VSETVLI instructions in a basic block.

Add simple pass for removing redundant vsetvli instructions within a basic block. This handles the case where the AVL register and VTYPE immediate are the same and no other instructions that change VTYPE or VL are between them.

There are going to be more opportunities for improvement in this space as we development more complex tests.

Differential Revision: https://reviews.llvm.org/D92679

Added: 
    llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
    llvm/test/CodeGen/RISCV/rvv/cleanup-vsetvli.mir

Modified: 
    llvm/lib/Target/RISCV/CMakeLists.txt
    llvm/lib/Target/RISCV/RISCV.h
    llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
    llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
    llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
    llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/CMakeLists.txt b/llvm/lib/Target/RISCV/CMakeLists.txt
index 8996eed153eb..471cf59ba7ff 100644
--- a/llvm/lib/Target/RISCV/CMakeLists.txt
+++ b/llvm/lib/Target/RISCV/CMakeLists.txt
@@ -21,6 +21,7 @@ add_public_tablegen_target(RISCVCommonTableGen)
 add_llvm_target(RISCVCodeGen
   RISCVAsmPrinter.cpp
   RISCVCallLowering.cpp
+  RISCVCleanupVSETVLI.cpp
   RISCVExpandAtomicPseudoInsts.cpp
   RISCVExpandPseudoInsts.cpp
   RISCVFrameLowering.cpp

diff  --git a/llvm/lib/Target/RISCV/RISCV.h b/llvm/lib/Target/RISCV/RISCV.h
index 9baa2cc2741a..23a79c7468c4 100644
--- a/llvm/lib/Target/RISCV/RISCV.h
+++ b/llvm/lib/Target/RISCV/RISCV.h
@@ -46,6 +46,9 @@ void initializeRISCVExpandPseudoPass(PassRegistry &);
 FunctionPass *createRISCVExpandAtomicPseudoPass();
 void initializeRISCVExpandAtomicPseudoPass(PassRegistry &);
 
+FunctionPass *createRISCVCleanupVSETVLIPass();
+void initializeRISCVCleanupVSETVLIPass(PassRegistry &);
+
 InstructionSelector *createRISCVInstructionSelector(const RISCVTargetMachine &,
                                                     RISCVSubtarget &,
                                                     RISCVRegisterBankInfo &);

diff  --git a/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
new file mode 100644
index 000000000000..6a12f99b8903
--- /dev/null
+++ b/llvm/lib/Target/RISCV/RISCVCleanupVSETVLI.cpp
@@ -0,0 +1,131 @@
+//===- RISCVCleanupVSETVLI.cpp - Cleanup unneeded VSETVLI instructions ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a function pass that removes duplicate vsetvli
+// instructions within a basic block.
+//
+//===----------------------------------------------------------------------===//
+
+#include "RISCV.h"
+#include "RISCVSubtarget.h"
+#include "llvm/CodeGen/MachineFunctionPass.h"
+using namespace llvm;
+
+#define DEBUG_TYPE "riscv-cleanup-vsetvli"
+#define RISCV_CLEANUP_VSETVLI_NAME "RISCV Cleanup VSETVLI pass"
+
+namespace {
+
+class RISCVCleanupVSETVLI : public MachineFunctionPass {
+public:
+  static char ID;
+
+  RISCVCleanupVSETVLI() : MachineFunctionPass(ID) {
+    initializeRISCVCleanupVSETVLIPass(*PassRegistry::getPassRegistry());
+  }
+  bool runOnMachineFunction(MachineFunction &MF) override;
+  bool runOnMachineBasicBlock(MachineBasicBlock &MBB);
+
+  MachineFunctionProperties getRequiredProperties() const override {
+    return MachineFunctionProperties().set(
+        MachineFunctionProperties::Property::IsSSA);
+  }
+
+  // This pass modifies the program, but does not modify the CFG
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesCFG();
+    MachineFunctionPass::getAnalysisUsage(AU);
+  }
+
+  StringRef getPassName() const override { return RISCV_CLEANUP_VSETVLI_NAME; }
+};
+
+} // end anonymous namespace
+
+char RISCVCleanupVSETVLI::ID = 0;
+
+INITIALIZE_PASS(RISCVCleanupVSETVLI, DEBUG_TYPE,
+                RISCV_CLEANUP_VSETVLI_NAME, false, false)
+
+bool RISCVCleanupVSETVLI::runOnMachineBasicBlock(MachineBasicBlock &MBB) {
+  bool Changed = false;
+  MachineInstr *PrevVSETVLI = nullptr;
+
+  for (auto MII = MBB.begin(), MIE = MBB.end(); MII != MIE;) {
+    MachineInstr &MI = *MII++;
+
+    if (MI.getOpcode() != RISCV::PseudoVSETVLI) {
+      if (PrevVSETVLI &&
+          (MI.isCall() || MI.modifiesRegister(RISCV::VL) ||
+           MI.modifiesRegister(RISCV::VTYPE))) {
+        // Old VL/VTYPE is overwritten.
+        PrevVSETVLI = nullptr;
+      }
+      continue;
+    }
+
+    // If we don't have a previous VSETVLI or the VL output isn't dead, we
+    // can't remove this VSETVLI.
+    if (!PrevVSETVLI || !MI.getOperand(0).isDead()) {
+      PrevVSETVLI = &MI;
+      continue;
+    }
+
+    Register PrevAVLReg = PrevVSETVLI->getOperand(1).getReg();
+    Register AVLReg = MI.getOperand(1).getReg();
+    int64_t PrevVTYPEImm = PrevVSETVLI->getOperand(2).getImm();
+    int64_t VTYPEImm = MI.getOperand(2).getImm();
+
+    // Does this VSETVLI use the same AVL register and VTYPE immediate?
+    if (PrevAVLReg != AVLReg || PrevVTYPEImm != VTYPEImm) {
+      PrevVSETVLI = &MI;
+      continue;
+    }
+
+    // If the AVLReg is X0 we need to look at the output VL of both VSETVLIs.
+    if (AVLReg == RISCV::X0) {
+      Register PrevOutVL = PrevVSETVLI->getOperand(0).getReg();
+      Register OutVL = MI.getOperand(0).getReg();
+      // We can't remove if the previous VSETVLI left VL unchanged and the
+      // current instruction is setting it to VLMAX. Without knowing the VL
+      // before the previous instruction we don't know if this is a change.
+      if (PrevOutVL == RISCV::X0 && OutVL != RISCV::X0) {
+        PrevVSETVLI = &MI;
+        continue;
+      }
+    }
+
+    // This VSETVLI is redundant, remove it.
+    MI.eraseFromParent();
+    Changed = true;
+  }
+
+  return Changed;
+}
+
+bool RISCVCleanupVSETVLI::runOnMachineFunction(MachineFunction &MF) {
+  if (skipFunction(MF.getFunction()))
+    return false;
+
+  // Skip if the vector extension is not enabled.
+  const RISCVSubtarget &ST = MF.getSubtarget<RISCVSubtarget>();
+  if (!ST.hasStdExtV())
+    return false;
+
+  bool Changed = false;
+
+  for (MachineBasicBlock &MBB : MF)
+    Changed |= runOnMachineBasicBlock(MBB);
+
+  return Changed;
+}
+
+/// Returns an instance of the Cleanup VSETVLI pass.
+FunctionPass *llvm::createRISCVCleanupVSETVLIPass() {
+  return new RISCVCleanupVSETVLI();
+}

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 99515ab335a8..5851f56bf4cf 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -39,6 +39,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
   initializeGlobalISel(*PR);
   initializeRISCVMergeBaseOffsetOptPass(*PR);
   initializeRISCVExpandPseudoPass(*PR);
+  initializeRISCVCleanupVSETVLIPass(*PR);
 }
 
 static StringRef computeDataLayout(const Triple &TT) {
@@ -183,6 +184,8 @@ void RISCVPassConfig::addPreEmitPass2() {
 }
 
 void RISCVPassConfig::addPreRegAlloc() {
-  if (TM->getOptLevel() != CodeGenOpt::None)
+  if (TM->getOptLevel() != CodeGenOpt::None) {
     addPass(createRISCVMergeBaseOffsetOptPass());
+    addPass(createRISCVCleanupVSETVLIPass());
+  }
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
index 16e2aefc61ca..049de4e1bd23 100644
--- a/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/add-vsetvli-gpr.mir
@@ -49,12 +49,9 @@ body:             |
 # POST-INSERTER: dead %12:gpr = PseudoVSETVLI %0, 76, implicit-def $vl, implicit-def $vtype
 # POST-INSERTER: PseudoVSE64_V_M1 killed %8, %3, $noreg, $noreg, -1, implicit $vl, implicit $vtype :: (store unknown-size into %ir.pc, align 8)
 
-# CODEGEN: vsetvli	a4, a3, e64,m1,ta,mu
+# CODEGEN: vsetvli	a3, a3, e64,m1,ta,mu
 # CODEGEN-NEXT: vle64.v	v25, (a1)
-# CODEGEN-NEXT: vsetvli	a1, a3, e64,m1,ta,mu
 # CODEGEN-NEXT: vle64.v	v26, (a2)
-# CODEGEN-NEXT: vsetvli	a1, a3, e64,m1,ta,mu
 # CODEGEN-NEXT: vadd.vv	v25, v25, v26
-# CODEGEN-NEXT: vsetvli	a1, a3, e64,m1,ta,mu
 # CODEGEN-NEXT: vse64.v	v25, (a0)
 # CODEGEN-NEXT: ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/cleanup-vsetvli.mir b/llvm/test/CodeGen/RISCV/rvv/cleanup-vsetvli.mir
new file mode 100644
index 000000000000..6af6204848bc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/cleanup-vsetvli.mir
@@ -0,0 +1,40 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc %s -mtriple=riscv64 -run-pass=riscv-cleanup-vsetvli -o - | FileCheck %s
+
+# Make sure we don't combine these two VSETVLIs in the cleanup pass. The first
+# keeps the previous value of VL, the second time sets it to VLMAX. We can't
+# remove the first since we can't tell if this is a change VL.
+
+--- |
+  ; ModuleID = '../llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll'
+  source_filename = "../llvm/test/CodeGen/RISCV/rvv/add-vsetvli-vlmax.ll"
+  target datalayout = "e-m:e-p:64:64-i64:64-i128:128-n64-S128"
+  target triple = "riscv64"
+
+  define void @cleanup_vsetvli() #0 {
+    ret void
+  }
+
+  attributes #0 = { "target-features"="+experimental-v" }
+
+...
+---
+name:            cleanup_vsetvli
+alignment:       4
+tracksRegLiveness: true
+registers:
+  - { id: 0, class: gpr }
+frameInfo:
+  maxAlignment:    1
+machineFunctionInfo: {}
+body:             |
+  bb.0 (%ir-block.0):
+    ; CHECK-LABEL: name: cleanup_vsetvli
+    ; CHECK: dead $x0 = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
+    ; CHECK: dead %0:gpr = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
+    ; CHECK: PseudoRET
+    dead $x0  = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
+    dead %0:gpr  = PseudoVSETVLI $x0, 12, implicit-def $vl, implicit-def $vtype
+    PseudoRET
+
+...

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
index 52d70831c6a9..9e133c1d95fe 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-16.ll
@@ -9,11 +9,8 @@ define void @vadd_vint16m1(<vscale x 4 x i16> *%pc, <vscale x 4 x i16> *%pa, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e16,m1,ta,mu
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 4 x i16>, <vscale x 4 x i16>* %pa
@@ -28,11 +25,8 @@ define void @vadd_vint16m2(<vscale x 8 x i16> *%pc, <vscale x 8 x i16> *%pa, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
-; CHECK-NEXT:    vsetvli a1, zero, e16,m2,ta,mu
 ; CHECK-NEXT:    vse16.v v26, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 8 x i16>, <vscale x 8 x i16>* %pa
@@ -47,11 +41,8 @@ define void @vadd_vint16m4(<vscale x 16 x i16> *%pc, <vscale x 16 x i16> *%pa, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
-; CHECK-NEXT:    vsetvli a1, zero, e16,m4,ta,mu
 ; CHECK-NEXT:    vse16.v v28, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 16 x i16>, <vscale x 16 x i16>* %pa
@@ -66,11 +57,8 @@ define void @vadd_vint16m8(<vscale x 32 x i16> *%pc, <vscale x 32 x i16> *%pa, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v8, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vle16.v v16, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    vsetvli a1, zero, e16,m8,ta,mu
 ; CHECK-NEXT:    vse16.v v8, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 32 x i16>, <vscale x 32 x i16>* %pa
@@ -85,11 +73,8 @@ define void @vadd_vint16mf2(<vscale x 2 x i16> *%pc, <vscale x 2 x i16> *%pa, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf2,ta,mu
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 2 x i16>, <vscale x 2 x i16>* %pa
@@ -104,11 +89,8 @@ define void @vadd_vint16mf4(<vscale x 1 x i16> *%pc, <vscale x 1 x i16> *%pa, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vle16.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e16,mf4,ta,mu
 ; CHECK-NEXT:    vse16.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 1 x i16>, <vscale x 1 x i16>* %pa

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
index e99432109916..4ad224f42823 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-32.ll
@@ -9,11 +9,8 @@ define void @vadd_vint32m1(<vscale x 2 x i32> *%pc, <vscale x 2 x i32> *%pa, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e32,m1,ta,mu
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 2 x i32>, <vscale x 2 x i32>* %pa
@@ -28,11 +25,8 @@ define void @vadd_vint32m2(<vscale x 4 x i32> *%pc, <vscale x 4 x i32> *%pa, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
-; CHECK-NEXT:    vsetvli a1, zero, e32,m2,ta,mu
 ; CHECK-NEXT:    vse32.v v26, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 4 x i32>, <vscale x 4 x i32>* %pa
@@ -47,11 +41,8 @@ define void @vadd_vint32m4(<vscale x 8 x i32> *%pc, <vscale x 8 x i32> *%pa, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
-; CHECK-NEXT:    vsetvli a1, zero, e32,m4,ta,mu
 ; CHECK-NEXT:    vse32.v v28, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 8 x i32>, <vscale x 8 x i32>* %pa
@@ -66,11 +57,8 @@ define void @vadd_vint32m8(<vscale x 16 x i32> *%pc, <vscale x 16 x i32> *%pa, <
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v8, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vle32.v v16, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    vsetvli a1, zero, e32,m8,ta,mu
 ; CHECK-NEXT:    vse32.v v8, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 16 x i32>, <vscale x 16 x i32>* %pa
@@ -85,11 +73,8 @@ define void @vadd_vint32mf2(<vscale x 1 x i32> *%pc, <vscale x 1 x i32> *%pa, <v
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vle32.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e32,mf2,ta,mu
 ; CHECK-NEXT:    vse32.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 1 x i32>, <vscale x 1 x i32>* %pa

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
index 74ac7adef14a..09f74f7092bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-64.ll
@@ -9,11 +9,8 @@ define void @vadd_vint64m1(<vscale x 1 x i64> *%pc, <vscale x 1 x i64> *%pa, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e64,m1,ta,mu
 ; CHECK-NEXT:    vse64.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 1 x i64>, <vscale x 1 x i64>* %pa
@@ -28,11 +25,8 @@ define void @vadd_vint64m2(<vscale x 2 x i64> *%pc, <vscale x 2 x i64> *%pa, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
-; CHECK-NEXT:    vsetvli a1, zero, e64,m2,ta,mu
 ; CHECK-NEXT:    vse64.v v26, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 2 x i64>, <vscale x 2 x i64>* %pa
@@ -47,11 +41,8 @@ define void @vadd_vint64m4(<vscale x 4 x i64> *%pc, <vscale x 4 x i64> *%pa, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
-; CHECK-NEXT:    vsetvli a1, zero, e64,m4,ta,mu
 ; CHECK-NEXT:    vse64.v v28, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 4 x i64>, <vscale x 4 x i64>* %pa
@@ -66,11 +57,8 @@ define void @vadd_vint64m8(<vscale x 8 x i64> *%pc, <vscale x 8 x i64> *%pa, <vs
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v8, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vle64.v v16, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    vsetvli a1, zero, e64,m8,ta,mu
 ; CHECK-NEXT:    vse64.v v8, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 8 x i64>, <vscale x 8 x i64>* %pa

diff  --git a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
index 7e575f8b7649..ae6ea9970325 100644
--- a/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/load-add-store-8.ll
@@ -9,11 +9,8 @@ define void @vadd_vint8m1(<vscale x 8 x i8> *%pc, <vscale x 8 x i8> *%pa, <vscal
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e8,m1,ta,mu
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 8 x i8>, <vscale x 8 x i8>* %pa
@@ -28,11 +25,8 @@ define void @vadd_vint8m2(<vscale x 16 x i8> *%pc, <vscale x 16 x i8> *%pa, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vadd.vv v26, v26, v28
-; CHECK-NEXT:    vsetvli a1, zero, e8,m2,ta,mu
 ; CHECK-NEXT:    vse8.v v26, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 16 x i8>, <vscale x 16 x i8>* %pa
@@ -47,11 +41,8 @@ define void @vadd_vint8m4(<vscale x 32 x i8> *%pc, <vscale x 32 x i8> *%pa, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v28, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vadd.vv v28, v28, v8
-; CHECK-NEXT:    vsetvli a1, zero, e8,m4,ta,mu
 ; CHECK-NEXT:    vse8.v v28, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 32 x i8>, <vscale x 32 x i8>* %pa
@@ -66,11 +57,8 @@ define void @vadd_vint8m8(<vscale x 64 x i8> *%pc, <vscale x 64 x i8> *%pa, <vsc
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v8, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vle8.v v16, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vadd.vv v8, v8, v16
-; CHECK-NEXT:    vsetvli a1, zero, e8,m8,ta,mu
 ; CHECK-NEXT:    vse8.v v8, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 64 x i8>, <vscale x 64 x i8>* %pa
@@ -85,11 +73,8 @@ define void @vadd_vint8mf2(<vscale x 4 x i8> *%pc, <vscale x 4 x i8> *%pa, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf2,ta,mu
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 4 x i8>, <vscale x 4 x i8>* %pa
@@ -104,11 +89,8 @@ define void @vadd_vint8mf4(<vscale x 2 x i8> *%pc, <vscale x 2 x i8> *%pa, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf4,ta,mu
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 2 x i8>, <vscale x 2 x i8>* %pa
@@ -123,11 +105,8 @@ define void @vadd_vint8mf8(<vscale x 1 x i8> *%pc, <vscale x 1 x i8> *%pa, <vsca
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetvli a3, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle8.v v25, (a1)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vle8.v v26, (a2)
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vadd.vv v25, v25, v26
-; CHECK-NEXT:    vsetvli a1, zero, e8,mf8,ta,mu
 ; CHECK-NEXT:    vse8.v v25, (a0)
 ; CHECK-NEXT:    ret
   %va = load <vscale x 1 x i8>, <vscale x 1 x i8>* %pa


        


More information about the llvm-commits mailing list