[Mlir-commits] [mlir] 95d2d1c - Move stepvector intrinsic out of experimental namespace (#98043)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Wed Aug 28 04:48:26 PDT 2024


Author: Maciej Gabka
Date: 2024-08-28T12:48:20+01:00
New Revision: 95d2d1cba0e1428718bbdce0504292f62b212920

URL: https://github.com/llvm/llvm-project/commit/95d2d1cba0e1428718bbdce0504292f62b212920
DIFF: https://github.com/llvm/llvm-project/commit/95d2d1cba0e1428718bbdce0504292f62b212920.diff

LOG: Move stepvector intrinsic out of experimental namespace (#98043)

This patch is moving out stepvector intrinsic from the experimental
namespace.

This intrinsic exists in LLVM for several years now, and is widely used.

Added: 
    llvm/test/Bitcode/upgrade-stepvector-intrinsic.ll

Modified: 
    llvm/docs/LangRef.rst
    llvm/docs/ReleaseNotes.rst
    llvm/include/llvm/CodeGen/BasicTTIImpl.h
    llvm/include/llvm/IR/Intrinsics.td
    llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/lib/IR/AutoUpgrade.cpp
    llvm/lib/IR/IRBuilder.cpp
    llvm/lib/IR/Verifier.cpp
    llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
    llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
    llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
    llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
    llvm/test/Analysis/CostModel/AArch64/neon-stepvector.ll
    llvm/test/Analysis/CostModel/AArch64/sve-stepvector.ll
    llvm/test/Analysis/CostModel/RISCV/stepvector.ll
    llvm/test/CodeGen/AArch64/neon-stepvector.ll
    llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
    llvm/test/CodeGen/AArch64/sve-stepvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
    llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
    llvm/test/CodeGen/RISCV/rvv/pr95865.ll
    llvm/test/CodeGen/RISCV/rvv/stepvector.ll
    llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
    llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
    llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll
    llvm/test/Transforms/InstCombine/vscale_extractelement.ll
    llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
    llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
    llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll
    llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
    llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
    llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
    llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
    llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
    llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
    llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
    llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
    llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
    llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
    llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
    llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll
    llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll
    llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll
    llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll
    llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll
    llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
    llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll
    llvm/test/Verifier/stepvector-intrinsic.ll
    llvm/unittests/IR/IRBuilderTest.cpp
    mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
    mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
    mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
    mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
    mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir
    mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-deinterleave.mlir
    mlir/test/Target/LLVMIR/llvmir-invalid.mlir

Removed: 
    


################################################################################
diff  --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 3581d1421febfd..8c696cb16e77f8 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -19581,27 +19581,27 @@ vector <N x eltty>, imm is a signed integer constant in the range
 -N <= imm < N. For a scalable vector <vscale x N x eltty>, imm is a signed
 integer constant in the range -X <= imm < X where X=vscale_range_min * N.
 
-'``llvm.experimental.stepvector``' Intrinsic
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+'``llvm.stepvector``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-This is an overloaded intrinsic. You can use ``llvm.experimental.stepvector``
+This is an overloaded intrinsic. You can use ``llvm.stepvector``
 to generate a vector whose lane values comprise the linear sequence
 <0, 1, 2, ...>. It is primarily intended for scalable vectors.
 
 ::
 
-      declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-      declare <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
+      declare <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+      declare <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
 
-The '``llvm.experimental.stepvector``' intrinsics are used to create vectors
+The '``llvm.stepvector``' intrinsics are used to create vectors
 of integers whose elements contain a linear sequence of values starting from 0
-with a step of 1.  This experimental intrinsic can only be used for vectors
-with integer elements that are at least 8 bits in size. If the sequence value
-exceeds the allowed limit for the element type then the result for that lane is
-undefined.
+with a step of 1. This intrinsic can only be used for vectors with integer
+elements that are at least 8 bits in size. If the sequence value exceeds
+the allowed limit for the element type then the result for that lane is
+a poison value.
 
 These intrinsics work for both fixed and scalable vectors. While this intrinsic
-is marked as experimental, the recommended way to express this operation for
+supports all vector types, the recommended way to express this operation for
 fixed-width vectors is still to generate a constant vector instead.
 
 

diff  --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index c2773c92498de9..9982b5f427e4b2 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -52,6 +52,7 @@ Changes to the LLVM IR
 
 * The ``x86_mmx`` IR type has been removed. It will be translated to
   the standard vector type ``<1 x i64>`` in bitcode upgrade.
+* Renamed ``llvm.experimental.stepvector`` intrinsic to ``llvm.stepvector``.
 
 Changes to LLVM infrastructure
 ------------------------------

diff  --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
index 217e3f1324f9c9..47323ca067a435 100644
--- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h
@@ -1641,7 +1641,7 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
       return thisT()->getStridedMemoryOpCost(Instruction::Load, RetTy, Ptr,
                                              VarMask, Alignment, CostKind, I);
     }
-    case Intrinsic::experimental_stepvector: {
+    case Intrinsic::stepvector: {
       if (isa<ScalableVectorType>(RetTy))
         return BaseT::getIntrinsicInstrCost(ICA, CostKind);
       // The cost of materialising a constant integer vector.
@@ -1789,8 +1789,8 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
       Type *NewVecTy = VectorType::get(
           NewEltTy, cast<VectorType>(Args[0]->getType())->getElementCount());
 
-      IntrinsicCostAttributes StepVecAttrs(Intrinsic::experimental_stepvector,
-                                           NewVecTy, {}, FMF);
+      IntrinsicCostAttributes StepVecAttrs(Intrinsic::stepvector, NewVecTy, {},
+                                           FMF);
       InstructionCost Cost =
           thisT()->getIntrinsicInstrCost(StepVecAttrs, CostKind);
 

diff  --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index 0841273fd2e1e5..e3bf0446575ae5 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -1846,8 +1846,8 @@ def int_threadlocal_address : DefaultAttrsIntrinsic<[llvm_anyptr_ty], [LLVMMatch
                                                     [NonNull<RetIndex>, NonNull<ArgIndex<0>>,
                                                      IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
 
-def int_experimental_stepvector : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-                                                        [], [IntrNoMem]>;
+def int_stepvector : DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                                            [], [IntrNoMem]>;
 
 //===---------------- Vector Predication Intrinsics --------------===//
 // Memory Intrinsics

diff  --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 60dcb118542785..8151dcfce9e4b0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -7781,7 +7781,7 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
   case Intrinsic::experimental_deoptimize:
     LowerDeoptimizeCall(&I);
     return;
-  case Intrinsic::experimental_stepvector:
+  case Intrinsic::stepvector:
     visitStepVector(I);
     return;
   case Intrinsic::vector_reduce_fadd:

diff  --git a/llvm/lib/IR/AutoUpgrade.cpp b/llvm/lib/IR/AutoUpgrade.cpp
index 8dd5b9b3ec3d1f..50fc2e728fcc01 100644
--- a/llvm/lib/IR/AutoUpgrade.cpp
+++ b/llvm/lib/IR/AutoUpgrade.cpp
@@ -1168,6 +1168,13 @@ static bool upgradeIntrinsicFunction1(Function *F, Function *&NewFn,
       }
       break; // No other 'experimental.vector.*'.
     }
+    if (Name.consume_front("experimental.stepvector.")) {
+      Intrinsic::ID ID = Intrinsic::stepvector;
+      rename(F);
+      NewFn = Intrinsic::getDeclaration(F->getParent(), ID,
+                                        F->getFunctionType()->getReturnType());
+      return true;
+    }
     break; // No other 'e*'.
   case 'f':
     if (Name.starts_with("flt.rounds")) {

diff  --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index e5cde875ab1d80..486ee99b355da5 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -117,8 +117,8 @@ Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
     if (STy->getScalarSizeInBits() < 8)
       StepVecType =
           VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType));
-    Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector,
-                                 {StepVecType}, {}, nullptr, Name);
+    Value *Res = CreateIntrinsic(Intrinsic::stepvector, {StepVecType}, {},
+                                 nullptr, Name);
     if (StepVecType != DstType)
       Res = CreateTrunc(Res, DstType);
     return Res;

diff  --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 5ff1f3dfb0dc95..2c0f10a34f919d 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -6097,11 +6097,11 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
           &Call);
     break;
   }
-  case Intrinsic::experimental_stepvector: {
+  case Intrinsic::stepvector: {
     VectorType *VecTy = dyn_cast<VectorType>(Call.getType());
     Check(VecTy && VecTy->getScalarType()->isIntegerTy() &&
               VecTy->getScalarSizeInBits() >= 8,
-          "experimental_stepvector only supported for vectors of integers "
+          "stepvector only supported for vectors of integers "
           "with a bitwidth of at least 8.",
           &Call);
     break;

diff  --git a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
index dc748290f2e21e..dbe4abdf5d5a5c 100644
--- a/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64TargetTransformInfo.cpp
@@ -605,7 +605,7 @@ AArch64TTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
       return LT.first;
     break;
   }
-  case Intrinsic::experimental_stepvector: {
+  case Intrinsic::stepvector: {
     InstructionCost Cost = 1; // Cost of the `index' instruction
     auto LT = getTypeLegalizationCost(RetTy);
     // Legalisation of illegal vectors involves an `index' instruction plus

diff  --git a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
index 06e7110280b9a1..74297c50036fce 100644
--- a/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVGatherScatterLowering.cpp
@@ -127,7 +127,7 @@ static std::pair<Value *, Value *> matchStridedStart(Value *Start,
     return matchStridedConstant(StartC);
 
   // Base case, start is a stepvector
-  if (match(Start, m_Intrinsic<Intrinsic::experimental_stepvector>())) {
+  if (match(Start, m_Intrinsic<Intrinsic::stepvector>())) {
     auto *Ty = Start->getType()->getScalarType();
     return std::make_pair(ConstantInt::get(Ty, 0), ConstantInt::get(Ty, 1));
   }

diff  --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
index 5ec07b2a0aa8fd..537c62bb0aacd1 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp
@@ -929,7 +929,7 @@ RISCVTTIImpl::getIntrinsicInstrCost(const IntrinsicCostAttributes &ICA,
     break;
   }
   // TODO: add more intrinsic
-  case Intrinsic::experimental_stepvector: {
+  case Intrinsic::stepvector: {
     auto LT = getTypeLegalizationCost(RetTy);
     // Legalisation of illegal types involves an `index' instruction plus
     // (LT.first - 1) vector adds.

diff  --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 753ed55523c843..e018f80dc3b2c8 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -62,7 +62,7 @@ static bool cheapToScalarize(Value *V, Value *EI) {
   if (auto *C = dyn_cast<Constant>(V))
     return CEI || C->getSplatValue();
 
-  if (CEI && match(V, m_Intrinsic<Intrinsic::experimental_stepvector>())) {
+  if (CEI && match(V, m_Intrinsic<Intrinsic::stepvector>())) {
     ElementCount EC = cast<VectorType>(V->getType())->getElementCount();
     // Index needs to be lower than the minimum size of the vector, because
     // for scalable vector, the vector size is known at run time.
@@ -433,8 +433,7 @@ Instruction *InstCombinerImpl::visitExtractElementInst(ExtractElementInst &EI) {
       Intrinsic::ID IID = II->getIntrinsicID();
       // Index needs to be lower than the minimum size of the vector, because
       // for scalable vector, the vector size is known at run time.
-      if (IID == Intrinsic::experimental_stepvector &&
-          IndexC->getValue().ult(NumElts)) {
+      if (IID == Intrinsic::stepvector && IndexC->getValue().ult(NumElts)) {
         Type *Ty = EI.getType();
         unsigned BitWidth = Ty->getIntegerBitWidth();
         Value *Idx;

diff  --git a/llvm/test/Analysis/CostModel/AArch64/neon-stepvector.ll b/llvm/test/Analysis/CostModel/AArch64/neon-stepvector.ll
index cf208608c32009..f687ba8b0cf330 100644
--- a/llvm/test/Analysis/CostModel/AArch64/neon-stepvector.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/neon-stepvector.ll
@@ -6,36 +6,36 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 ; Check icmp for legal integer vectors.
 define void @stepvector_legal_int() {
 ; CHECK-LABEL: 'stepvector_legal_int'
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x i64> @llvm.experimental.stepvector.v2i64()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x i32> @llvm.experimental.stepvector.v4i32()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <8 x i16> @llvm.experimental.stepvector.v8i16()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <16 x i8> @llvm.experimental.stepvector.v16i8()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %1 = call <2 x i64> @llvm.stepvector.v2i64()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <4 x i32> @llvm.stepvector.v4i32()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <8 x i16> @llvm.stepvector.v8i16()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <16 x i8> @llvm.stepvector.v16i8()
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
 ;
-  %1 = call <2 x i64> @llvm.experimental.stepvector.v2i64()
-  %2 = call <4 x i32> @llvm.experimental.stepvector.v4i32()
-  %3 = call <8 x i16> @llvm.experimental.stepvector.v8i16()
-  %4 = call <16 x i8> @llvm.experimental.stepvector.v16i8()
+  %1 = call <2 x i64> @llvm.stepvector.v2i64()
+  %2 = call <4 x i32> @llvm.stepvector.v4i32()
+  %3 = call <8 x i16> @llvm.stepvector.v8i16()
+  %4 = call <16 x i8> @llvm.stepvector.v16i8()
   ret void
 }
 
 ; Check icmp for an illegal integer vector.
 define void @stepvector_illegal_int() {
 ; CHECK-LABEL: 'stepvector_illegal_int'
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %1 = call <4 x i64> @llvm.experimental.stepvector.v4i64()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %2 = call <16 x i32> @llvm.experimental.stepvector.v16i32()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %1 = call <4 x i64> @llvm.stepvector.v4i64()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %2 = call <16 x i32> @llvm.stepvector.v16i32()
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
 ;
-  %1 = call <4 x i64> @llvm.experimental.stepvector.v4i64()
-  %2 = call <16 x i32> @llvm.experimental.stepvector.v16i32()
+  %1 = call <4 x i64> @llvm.stepvector.v4i64()
+  %2 = call <16 x i32> @llvm.stepvector.v16i32()
   ret void
 }
 
 
-declare <2 x i64> @llvm.experimental.stepvector.v2i64()
-declare <4 x i32> @llvm.experimental.stepvector.v4i32()
-declare <8 x i16> @llvm.experimental.stepvector.v8i16()
-declare <16 x i8> @llvm.experimental.stepvector.v16i8()
+declare <2 x i64> @llvm.stepvector.v2i64()
+declare <4 x i32> @llvm.stepvector.v4i32()
+declare <8 x i16> @llvm.stepvector.v8i16()
+declare <16 x i8> @llvm.stepvector.v16i8()
 
-declare <4 x i64> @llvm.experimental.stepvector.v4i64()
-declare <16 x i32> @llvm.experimental.stepvector.v16i32()
+declare <4 x i64> @llvm.stepvector.v4i64()
+declare <16 x i32> @llvm.stepvector.v16i32()

diff  --git a/llvm/test/Analysis/CostModel/AArch64/sve-stepvector.ll b/llvm/test/Analysis/CostModel/AArch64/sve-stepvector.ll
index 677572432f9c32..994dfc7bb1a237 100644
--- a/llvm/test/Analysis/CostModel/AArch64/sve-stepvector.ll
+++ b/llvm/test/Analysis/CostModel/AArch64/sve-stepvector.ll
@@ -5,32 +5,32 @@ target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
 ; Check icmp for legal integer vectors.
 define void @stepvector_legal_int() {
 ; CHECK-LABEL: 'stepvector_legal_int'
-; CHECK: Cost Model: Found an estimated cost of 1 for instruction:   %1 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-; CHECK: Cost Model: Found an estimated cost of 1 for instruction:   %2 = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-; CHECK: Cost Model: Found an estimated cost of 1 for instruction:   %3 = call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
-; CHECK: Cost Model: Found an estimated cost of 1 for instruction:   %4 = call <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
-  %1 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-  %2 = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-  %3 = call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
-  %4 = call <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction:   %1 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction:   %2 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction:   %3 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
+; CHECK: Cost Model: Found an estimated cost of 1 for instruction:   %4 = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
+  %1 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+  %2 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+  %3 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
+  %4 = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
   ret void
 }
 
 ; Check icmp for an illegal integer vector.
 define void @stepvector_illegal_int() {
 ; CHECK-LABEL: 'stepvector_illegal_int'
-; CHECK: Cost Model: Found an estimated cost of 2 for instruction:   %1 = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
-; CHECK: Cost Model: Found an estimated cost of 4 for instruction:   %2 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
-  %1 = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
-  %2 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+; CHECK: Cost Model: Found an estimated cost of 2 for instruction:   %1 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; CHECK: Cost Model: Found an estimated cost of 4 for instruction:   %2 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+  %1 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+  %2 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
   ret void
 }
 
 
-declare <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-declare <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
-declare <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
+declare <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+declare <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+declare <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
+declare <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
 
-declare <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
-declare <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+declare <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+declare <vscale x 16 x i32> @llvm.stepvector.nxv16i32()

diff  --git a/llvm/test/Analysis/CostModel/RISCV/stepvector.ll b/llvm/test/Analysis/CostModel/RISCV/stepvector.ll
index e59995572a1080..49ca90ea31787e 100644
--- a/llvm/test/Analysis/CostModel/RISCV/stepvector.ll
+++ b/llvm/test/Analysis/CostModel/RISCV/stepvector.ll
@@ -4,87 +4,87 @@
 
 define void @stepvector() {
 ; CHECK-LABEL: 'stepvector'
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %1 = call <vscale x 1 x i8> @llvm.experimental.stepvector.nxv1i8()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <vscale x 2 x i8> @llvm.experimental.stepvector.nxv2i8()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <vscale x 4 x i8> @llvm.experimental.stepvector.nxv4i8()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %5 = call <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %6 = call <vscale x 32 x i8> @llvm.experimental.stepvector.nxv32i8()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %7 = call <vscale x 64 x i8> @llvm.experimental.stepvector.nxv64i8()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %8 = call <vscale x 128 x i8> @llvm.experimental.stepvector.nxv128i8()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 2 x i16> @llvm.experimental.stepvector.nxv2i16()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %11 = call <vscale x 4 x i16> @llvm.experimental.stepvector.nxv4i16()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %12 = call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %13 = call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %14 = call <vscale x 32 x i16> @llvm.experimental.stepvector.nxv32i16()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %15 = call <vscale x 64 x i16> @llvm.experimental.stepvector.nxv64i16()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i32> @llvm.experimental.stepvector.nxv1i32()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i32> @llvm.experimental.stepvector.nxv2i32()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %18 = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %19 = call <vscale x 8 x i32> @llvm.experimental.stepvector.nxv8i32()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %20 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %21 = call <vscale x 32 x i32> @llvm.experimental.stepvector.nxv32i32()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %22 = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %23 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %24 = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %25 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
-; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %26 = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %1 = call <vscale x 1 x i8> @llvm.stepvector.nxv1i8()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <vscale x 2 x i8> @llvm.stepvector.nxv2i8()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <vscale x 4 x i8> @llvm.stepvector.nxv4i8()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %5 = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %6 = call <vscale x 32 x i8> @llvm.stepvector.nxv32i8()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %7 = call <vscale x 64 x i8> @llvm.stepvector.nxv64i8()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %8 = call <vscale x 128 x i8> @llvm.stepvector.nxv128i8()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %9 = call <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %10 = call <vscale x 2 x i16> @llvm.stepvector.nxv2i16()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %11 = call <vscale x 4 x i16> @llvm.stepvector.nxv4i16()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %12 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %13 = call <vscale x 16 x i16> @llvm.stepvector.nxv16i16()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %14 = call <vscale x 32 x i16> @llvm.stepvector.nxv32i16()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %15 = call <vscale x 64 x i16> @llvm.stepvector.nxv64i16()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %17 = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %18 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %19 = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %20 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %21 = call <vscale x 32 x i32> @llvm.stepvector.nxv32i32()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %22 = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %23 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %24 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %25 = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %26 = call <vscale x 16 x i64> @llvm.stepvector.nxv16i64()
 ; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
 ;
-  call <vscale x 1 x i8> @llvm.experimental.stepvector.nxv1i8()
-  call <vscale x 2 x i8> @llvm.experimental.stepvector.nxv2i8()
-  call <vscale x 4 x i8> @llvm.experimental.stepvector.nxv4i8()
-  call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
-  call <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
-  call <vscale x 32 x i8> @llvm.experimental.stepvector.nxv32i8()
-  call <vscale x 64 x i8> @llvm.experimental.stepvector.nxv64i8()
-  call <vscale x 128 x i8> @llvm.experimental.stepvector.nxv128i8()
-  call <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
-  call <vscale x 2 x i16> @llvm.experimental.stepvector.nxv2i16()
-  call <vscale x 4 x i16> @llvm.experimental.stepvector.nxv4i16()
-  call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
-  call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
-  call <vscale x 32 x i16> @llvm.experimental.stepvector.nxv32i16()
-  call <vscale x 64 x i16> @llvm.experimental.stepvector.nxv64i16()
-  call <vscale x 1 x i32> @llvm.experimental.stepvector.nxv1i32()
-  call <vscale x 2 x i32> @llvm.experimental.stepvector.nxv2i32()
-  call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-  call <vscale x 8 x i32> @llvm.experimental.stepvector.nxv8i32()
-  call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
-  call <vscale x 32 x i32> @llvm.experimental.stepvector.nxv32i32()
-  call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
-  call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-  call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
-  call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
-  call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
+  call <vscale x 1 x i8> @llvm.stepvector.nxv1i8()
+  call <vscale x 2 x i8> @llvm.stepvector.nxv2i8()
+  call <vscale x 4 x i8> @llvm.stepvector.nxv4i8()
+  call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+  call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
+  call <vscale x 32 x i8> @llvm.stepvector.nxv32i8()
+  call <vscale x 64 x i8> @llvm.stepvector.nxv64i8()
+  call <vscale x 128 x i8> @llvm.stepvector.nxv128i8()
+  call <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
+  call <vscale x 2 x i16> @llvm.stepvector.nxv2i16()
+  call <vscale x 4 x i16> @llvm.stepvector.nxv4i16()
+  call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
+  call <vscale x 16 x i16> @llvm.stepvector.nxv16i16()
+  call <vscale x 32 x i16> @llvm.stepvector.nxv32i16()
+  call <vscale x 64 x i16> @llvm.stepvector.nxv64i16()
+  call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+  call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+  call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+  call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+  call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+  call <vscale x 32 x i32> @llvm.stepvector.nxv32i32()
+  call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
+  call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+  call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+  call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+  call <vscale x 16 x i64> @llvm.stepvector.nxv16i64()
   ret void
 }
 
 
-declare <vscale x 1 x i8> @llvm.experimental.stepvector.nxv1i8()
-declare <vscale x 2 x i8> @llvm.experimental.stepvector.nxv2i8()
-declare <vscale x 4 x i8> @llvm.experimental.stepvector.nxv4i8()
-declare <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
-declare <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
-declare <vscale x 32 x i8> @llvm.experimental.stepvector.nxv32i8()
-declare <vscale x 64 x i8> @llvm.experimental.stepvector.nxv64i8()
-declare <vscale x 128 x i8> @llvm.experimental.stepvector.nxv128i8()
-declare <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
-declare <vscale x 2 x i16> @llvm.experimental.stepvector.nxv2i16()
-declare <vscale x 4 x i16> @llvm.experimental.stepvector.nxv4i16()
-declare <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
-declare <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
-declare <vscale x 32 x i16> @llvm.experimental.stepvector.nxv32i16()
-declare <vscale x 64 x i16> @llvm.experimental.stepvector.nxv64i16()
-declare <vscale x 1 x i32> @llvm.experimental.stepvector.nxv1i32()
-declare <vscale x 2 x i32> @llvm.experimental.stepvector.nxv2i32()
-declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-declare <vscale x 8 x i32> @llvm.experimental.stepvector.nxv8i32()
-declare <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
-declare <vscale x 32 x i32> @llvm.experimental.stepvector.nxv32i32()
-declare <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
-declare <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-declare <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
-declare <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
-declare <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
+declare <vscale x 1 x i8> @llvm.stepvector.nxv1i8()
+declare <vscale x 2 x i8> @llvm.stepvector.nxv2i8()
+declare <vscale x 4 x i8> @llvm.stepvector.nxv4i8()
+declare <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+declare <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
+declare <vscale x 32 x i8> @llvm.stepvector.nxv32i8()
+declare <vscale x 64 x i8> @llvm.stepvector.nxv64i8()
+declare <vscale x 128 x i8> @llvm.stepvector.nxv128i8()
+declare <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
+declare <vscale x 2 x i16> @llvm.stepvector.nxv2i16()
+declare <vscale x 4 x i16> @llvm.stepvector.nxv4i16()
+declare <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
+declare <vscale x 16 x i16> @llvm.stepvector.nxv16i16()
+declare <vscale x 32 x i16> @llvm.stepvector.nxv32i16()
+declare <vscale x 64 x i16> @llvm.stepvector.nxv64i16()
+declare <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
+declare <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+declare <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+declare <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
+declare <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+declare <vscale x 32 x i32> @llvm.stepvector.nxv32i32()
+declare <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
+declare <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+declare <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+declare <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+declare <vscale x 16 x i64> @llvm.stepvector.nxv16i64()

diff  --git a/llvm/test/Bitcode/upgrade-stepvector-intrinsic.ll b/llvm/test/Bitcode/upgrade-stepvector-intrinsic.ll
new file mode 100644
index 00000000000000..ee2c76efe6378a
--- /dev/null
+++ b/llvm/test/Bitcode/upgrade-stepvector-intrinsic.ll
@@ -0,0 +1,26 @@
+; RUN: opt -S < %s | FileCheck %s
+; RUN: llvm-as %s -o - | llvm-dis | FileCheck %s
+
+define <4 x i32> @stepvector_fixed() {
+; CHECK-LABEL: @stepvector_fixed
+; CHECK: %res = call <4 x i32> @llvm.stepvector.v4i32()
+
+  %res = call <4 x i32> @llvm.experimental.stepvector.v4i32()
+  ret <4 x i32> %res
+}
+
+define <vscale x 4 x i32> @stepvector_scalable() {
+; CHECK-LABEL: @stepvector_scalable
+; CHECK: %res = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+
+  %res = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+  ret <vscale x 4 x i32> %res
+}
+
+
+declare <4 x i32> @llvm.experimental.stepvector.v4i32()
+; CHECK: <4 x i32> @llvm.stepvector.v4i32()
+
+declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; CHECK: <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+

diff  --git a/llvm/test/CodeGen/AArch64/neon-stepvector.ll b/llvm/test/CodeGen/AArch64/neon-stepvector.ll
index 7255574f42097a..7a8a3c37a04a36 100644
--- a/llvm/test/CodeGen/AArch64/neon-stepvector.ll
+++ b/llvm/test/CodeGen/AArch64/neon-stepvector.ll
@@ -12,7 +12,7 @@ define <2 x i64> @stepvector_v2i64() {
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI0_0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <2 x i64> @llvm.experimental.stepvector.v2i64()
+  %0 = call <2 x i64> @llvm.stepvector.v2i64()
   ret <2 x i64> %0
 }
 
@@ -28,7 +28,7 @@ define <4 x i32> @stepvector_v4i32() {
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI1_0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <4 x i32> @llvm.experimental.stepvector.v4i32()
+  %0 = call <4 x i32> @llvm.stepvector.v4i32()
   ret <4 x i32> %0
 }
 
@@ -48,7 +48,7 @@ define <8 x i16> @stepvector_v8i16() {
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI2_0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <8 x i16> @llvm.experimental.stepvector.v8i16()
+  %0 = call <8 x i16> @llvm.stepvector.v8i16()
   ret <8 x i16> %0
 }
 
@@ -76,7 +76,7 @@ define <16 x i8> @stepvector_v16i8() {
 ; CHECK-NEXT:    ldr q0, [x8, :lo12:.LCPI3_0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <16 x i8> @llvm.experimental.stepvector.v16i8()
+  %0 = call <16 x i8> @llvm.stepvector.v16i8()
   ret <16 x i8> %0
 }
 
@@ -97,7 +97,7 @@ define <4 x i64> @stepvector_v4i64() {
 ; CHECK-NEXT:    ldr q1, [x9, :lo12:.LCPI4_1]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <4 x i64> @llvm.experimental.stepvector.v4i64()
+  %0 = call <4 x i64> @llvm.stepvector.v4i64()
   ret <4 x i64> %0
 }
 
@@ -134,7 +134,7 @@ define <16 x i32> @stepvector_v16i32() {
 ; CHECK-NEXT:    ldr q3, [x11, :lo12:.LCPI5_3]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <16 x i32> @llvm.experimental.stepvector.v16i32()
+  %0 = call <16 x i32> @llvm.stepvector.v16i32()
   ret <16 x i32> %0
 }
 
@@ -148,7 +148,7 @@ define <2 x i32> @stepvector_v2i32() {
 ; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI6_0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <2 x i32> @llvm.experimental.stepvector.v2i32()
+  %0 = call <2 x i32> @llvm.stepvector.v2i32()
   ret <2 x i32> %0
 }
 
@@ -164,17 +164,17 @@ define <4 x i16> @stepvector_v4i16() {
 ; CHECK-NEXT:    ldr d0, [x8, :lo12:.LCPI7_0]
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <4 x i16> @llvm.experimental.stepvector.v4i16()
+  %0 = call <4 x i16> @llvm.stepvector.v4i16()
   ret <4 x i16> %0
 }
 
 
-declare <2 x i64> @llvm.experimental.stepvector.v2i64()
-declare <4 x i32> @llvm.experimental.stepvector.v4i32()
-declare <8 x i16> @llvm.experimental.stepvector.v8i16()
-declare <16 x i8> @llvm.experimental.stepvector.v16i8()
+declare <2 x i64> @llvm.stepvector.v2i64()
+declare <4 x i32> @llvm.stepvector.v4i32()
+declare <8 x i16> @llvm.stepvector.v8i16()
+declare <16 x i8> @llvm.stepvector.v16i8()
 
-declare <4 x i64> @llvm.experimental.stepvector.v4i64()
-declare <16 x i32> @llvm.experimental.stepvector.v16i32()
-declare <2 x i32> @llvm.experimental.stepvector.v2i32()
-declare <4 x i16> @llvm.experimental.stepvector.v4i16()
+declare <4 x i64> @llvm.stepvector.v4i64()
+declare <16 x i32> @llvm.stepvector.v16i32()
+declare <2 x i32> @llvm.stepvector.v2i32()
+declare <4 x i16> @llvm.stepvector.v4i16()

diff  --git a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
index ad6371f78ec088..3030c38e13bf4f 100644
--- a/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
+++ b/llvm/test/CodeGen/AArch64/sve-gather-scatter-addr-opts.ll
@@ -15,7 +15,7 @@ define void @scatter_i8_index_offset_maximum(ptr %base, i64 %offset, <vscale x 4
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %t2 = insertelement <vscale x 4 x i64> undef, i64 33554431, i32 0
   %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
@@ -36,7 +36,7 @@ define void @scatter_i16_index_offset_minimum(ptr %base, i64 %offset, <vscale x
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %t2 = insertelement <vscale x 4 x i64> undef, i64 -33554432, i32 0
   %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i16, ptr %base, <vscale x 4 x i64> %t5
@@ -54,7 +54,7 @@ define <vscale x 4 x i8> @gather_i8_index_offset_8(ptr %base, i64 %offset, <vsca
 ; CHECK-NEXT:    ret
   %splat.insert0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %splat0 = shufflevector <vscale x 4 x i64> %splat.insert0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %splat.insert1 = insertelement <vscale x 4 x i64> undef, i64 1, i32 0
   %splat1 = shufflevector <vscale x 4 x i64> %splat.insert1, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %t1 = mul <vscale x 4 x i64> %splat1, %step
@@ -89,7 +89,7 @@ define void @scatter_f16_index_offset_var(ptr %base, i64 %offset, i64 %scale, <v
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %t2 = insertelement <vscale x 4 x i64> undef, i64 %scale, i32 0
   %t3 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr half, ptr %base, <vscale x 4 x i64> %t5
@@ -119,7 +119,7 @@ define void @scatter_i8_index_offset_maximum_plus_one(ptr %base, i64 %offset, <v
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %t2 = insertelement <vscale x 4 x i64> undef, i64 33554432, i32 0
   %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
@@ -150,7 +150,7 @@ define void @scatter_i8_index_offset_minimum_minus_one(ptr %base, i64 %offset, <
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %t2 = insertelement <vscale x 4 x i64> undef, i64 -33554433, i32 0
   %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
@@ -180,7 +180,7 @@ define void @scatter_i8_index_stride_too_big(ptr %base, i64 %offset, <vscale x 4
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %t2 = insertelement <vscale x 4 x i64> undef, i64 4611686018427387904, i32 0
   %t3 = shufflevector <vscale x 4 x i64> %t2, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t4 = mul <vscale x 4 x i64> %t3, %step
   %t5 = add <vscale x 4 x i64> %t1, %t4
   %t6 = getelementptr i8, ptr %base, <vscale x 4 x i64> %t5
@@ -200,7 +200,7 @@ define <vscale x 4 x i8> @gather_8i8_index_offset_8(ptr %base, i64 %offset, <vsc
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t2 = add <vscale x 4 x i64> %t1, %step
   %t3 = getelementptr [8 x i8], ptr %base, <vscale x 4 x i64> %t2
   %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
@@ -221,7 +221,7 @@ define <vscale x 4 x float> @gather_f32_index_offset_8(ptr %base, i64 %offset, <
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t2 = add <vscale x 4 x i64> %t1, %step
   %t3 = getelementptr [8 x float], ptr %base, <vscale x 4 x i64> %t2
   %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
@@ -241,7 +241,7 @@ define void @scatter_i8_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1>
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t2 = add <vscale x 4 x i64> %t1, %step
   %t3 = getelementptr [8 x i8], ptr %base, <vscale x 4 x i64> %t2
   %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
@@ -262,7 +262,7 @@ define void @scatter_f16_index_offset_8(ptr %base, i64 %offset, <vscale x 4 x i1
 ; CHECK-NEXT:    ret
   %t0 = insertelement <vscale x 4 x i64> undef, i64 %offset, i32 0
   %t1 = shufflevector <vscale x 4 x i64> %t0, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %t2 = add <vscale x 4 x i64> %t1, %step
   %t3 = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %t2
   %t4 = bitcast <vscale x 4 x ptr> %t3 to <vscale x 4 x ptr>
@@ -284,7 +284,7 @@ define void @scatter_f16_index_add_add(ptr %base, i64 %offset, i64 %offset2, <vs
   %splat.offset = shufflevector <vscale x 4 x i64> %splat.offset.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %splat.offset2.ins = insertelement <vscale x 4 x i64> undef, i64 %offset2, i32 0
   %splat.offset2 = shufflevector <vscale x 4 x i64> %splat.offset2.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %add1 = add <vscale x 4 x i64> %splat.offset, %step
   %add2 = add <vscale x 4 x i64> %add1, %splat.offset2
   %gep = getelementptr [8 x half], ptr %base, <vscale x 4 x i64> %add2
@@ -307,7 +307,7 @@ define void @scatter_f16_index_add_add_mul(ptr %base, i64 %offset, i64 %offset2,
   %splat.offset = shufflevector <vscale x 4 x i64> %splat.offset.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
   %splat.offset2.ins = insertelement <vscale x 4 x i64> undef, i64 %offset2, i32 0
   %splat.offset2 = shufflevector <vscale x 4 x i64> %splat.offset2.ins, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
-  %step = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %step = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %add1 = add <vscale x 4 x i64> %splat.offset, %step
   %add2 = add <vscale x 4 x i64> %add1, %splat.offset2
   %splat.const8.ins = insertelement <vscale x 4 x i64> undef, i64 8, i32 0
@@ -488,4 +488,4 @@ declare void @llvm.masked.scatter.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x ptr>
 declare void @llvm.masked.scatter.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
 declare void @llvm.masked.scatter.nxv4f16(<vscale x 4 x half>, <vscale x 4 x ptr>, i32, <vscale x 4 x i1>)
 
-declare <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+declare <vscale x 4 x i64> @llvm.stepvector.nxv4i64()

diff  --git a/llvm/test/CodeGen/AArch64/sve-stepvector.ll b/llvm/test/CodeGen/AArch64/sve-stepvector.ll
index 4c5f27d3e7093e..f79ec00a19e6f4 100644
--- a/llvm/test/CodeGen/AArch64/sve-stepvector.ll
+++ b/llvm/test/CodeGen/AArch64/sve-stepvector.ll
@@ -9,7 +9,7 @@ define <vscale x 2 x i64> @stepvector_nxv2i64() {
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %0 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   ret <vscale x 2 x i64> %0
 }
 
@@ -19,7 +19,7 @@ define <vscale x 4 x i32> @stepvector_nxv4i32() {
 ; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+  %0 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
   ret <vscale x 4 x i32> %0
 }
 
@@ -29,7 +29,7 @@ define <vscale x 8 x i16> @stepvector_nxv8i16() {
 ; CHECK-NEXT:    index z0.h, #0, #1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
+  %0 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
   ret <vscale x 8 x i16> %0
 }
 
@@ -39,7 +39,7 @@ define <vscale x 16 x i8> @stepvector_nxv16i8() {
 ; CHECK-NEXT:    index z0.b, #0, #1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
+  %0 = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
   ret <vscale x 16 x i8> %0
 }
 
@@ -55,7 +55,7 @@ define <vscale x 6 x i64> @stepvector_nxv6i64() {
 ; CHECK-NEXT:    incd z2.d, all, mul #2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 6 x i64> @llvm.experimental.stepvector.nxv6i64()
+  %0 = call <vscale x 6 x i64> @llvm.stepvector.nxv6i64()
   ret <vscale x 6 x i64> %0
 }
 
@@ -67,7 +67,7 @@ define <vscale x 4 x i64> @stepvector_nxv4i64() {
 ; CHECK-NEXT:    incd z1.d
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %0 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   ret <vscale x 4 x i64> %0
 }
 
@@ -83,7 +83,7 @@ define <vscale x 16 x i32> @stepvector_nxv16i32() {
 ; CHECK-NEXT:    incw z3.s, all, mul #2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+  %0 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
   ret <vscale x 16 x i32> %0
 }
 
@@ -93,7 +93,7 @@ define <vscale x 3 x i32> @stepvector_nxv3i32() {
 ; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 3 x i32> @llvm.experimental.stepvector.nxv3i32()
+  %0 = call <vscale x 3 x i32> @llvm.stepvector.nxv3i32()
   ret <vscale x 3 x i32> %0
 }
 
@@ -103,7 +103,7 @@ define <vscale x 2 x i32> @stepvector_nxv2i32() {
 ; CHECK-NEXT:    index z0.d, #0, #1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 2 x i32> @llvm.experimental.stepvector.nxv2i32()
+  %0 = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
   ret <vscale x 2 x i32> %0
 }
 
@@ -113,7 +113,7 @@ define <vscale x 4 x i16> @stepvector_nxv4i16() {
 ; CHECK-NEXT:    index z0.s, #0, #1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 4 x i16> @llvm.experimental.stepvector.nxv4i16()
+  %0 = call <vscale x 4 x i16> @llvm.stepvector.nxv4i16()
   ret <vscale x 4 x i16> %0
 }
 
@@ -123,7 +123,7 @@ define <vscale x 8 x i8> @stepvector_nxv8i8() {
 ; CHECK-NEXT:    index z0.h, #0, #1
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %0 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   ret <vscale x 8 x i8> %0
 }
 
@@ -133,8 +133,8 @@ define <vscale x 8 x i8> @add_stepvector_nxv8i8() {
 ; CHECK-NEXT:    index z0.h, #0, #2
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
-  %1 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %0 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+  %1 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %2 = add <vscale x 8 x i8> %0, %1
   ret <vscale x 8 x i8> %2
 }
@@ -146,9 +146,9 @@ define <vscale x 8 x i8> @add_stepvector_nxv8i8_1(<vscale x 8 x i8> %p) {
 ; CHECK-NEXT:    add z0.h, z0.h, z1.h
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %0 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %1 = add <vscale x 8 x i8> %p, %0
-  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %3 = add <vscale x 8 x i8> %1, %2
   ret <vscale x 8 x i8> %3
 }
@@ -161,7 +161,7 @@ define <vscale x 8 x i8> @add_stepvector_nxv8i8_2() {
 entry:
   %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
   %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %3 = add <vscale x 8 x i8> %2, %1
   ret <vscale x 8 x i8> %3
 }
@@ -174,7 +174,7 @@ define <vscale x 8 x i8> @add_stepvector_nxv8i8_2_commutative() {
 entry:
   %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
   %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %3 = add <vscale x 8 x i8> %1, %2
   ret <vscale x 8 x i8> %3
 }
@@ -187,7 +187,7 @@ define <vscale x 8 x i16> @add_stepvector_nxv8i16_1(i16 %data) {
 entry:
   %0 = insertelement <vscale x 8 x i16> poison, i16 %data, i32 0
   %1 = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
+  %2 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
   %3 = add <vscale x 8 x i16> %2, %1
   ret <vscale x 8 x i16> %3
 }
@@ -200,7 +200,7 @@ define <vscale x 4 x i32> @add_stepvector_nxv4i32_1(i32 %data) {
 entry:
   %0 = insertelement <vscale x 4 x i32> poison, i32 %data, i32 0
   %1 = shufflevector <vscale x 4 x i32> %0, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %2 = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+  %2 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
   %3 = add <vscale x 4 x i32> %2, %1
   ret <vscale x 4 x i32> %3
 }
@@ -217,7 +217,7 @@ define <vscale x 4 x i32> @multiple_use_stepvector_nxv4i32_1(i32 %data) {
 entry:
   %0 = insertelement <vscale x 4 x i32> poison, i32 %data, i32 0
   %1 = shufflevector <vscale x 4 x i32> %0, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
-  %2 = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+  %2 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
   %3 = add <vscale x 4 x i32> %2, %1
   %4 = mul <vscale x 4 x i32> %1, %3
   %5 = sub <vscale x 4 x i32> %4, %3
@@ -232,7 +232,7 @@ define <vscale x 2 x i64> @add_stepvector_nxv2i64_1(i64 %data) {
 entry:
   %0 = insertelement <vscale x 2 x i64> poison, i64 %data, i32 0
   %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %3 = add <vscale x 2 x i64> %1, %2
   ret <vscale x 2 x i64> %3
 }
@@ -249,7 +249,7 @@ define <vscale x 2 x i64> @multiple_use_stepvector_nxv2i64_1(i64 %data) {
 entry:
   %0 = insertelement <vscale x 2 x i64> poison, i64 %data, i32 0
   %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %3 = add <vscale x 2 x i64> %1, %2
   %4 = mul <vscale x 2 x i64> %3, %2
   ret <vscale x 2 x i64> %4
@@ -263,7 +263,7 @@ define <vscale x 8 x i8> @mul_stepvector_nxv8i8() {
 entry:
   %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
   %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %3 = mul <vscale x 8 x i8> %2, %1
   ret <vscale x 8 x i8> %3
 }
@@ -277,7 +277,7 @@ define <vscale x 2 x i64> @mul_stepvector_nxv2i64() {
 entry:
   %0 = insertelement <vscale x 2 x i64> poison, i64 2222, i32 0
   %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %3 = mul <vscale x 2 x i64> %2, %1
   ret <vscale x 2 x i64> %3
 }
@@ -291,7 +291,7 @@ define <vscale x 2 x i64> @mul_stepvector_bigconst_nxv2i64() {
 entry:
   %0 = insertelement <vscale x 2 x i64> poison, i64 146028888064, i32 0
   %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %3 = mul <vscale x 2 x i64> %2, %1
   ret <vscale x 2 x i64> %3
 }
@@ -305,7 +305,7 @@ define <vscale x 2 x i64> @mul_add_stepvector_nxv2i64(i64 %x) {
 entry:
   %0 = insertelement <vscale x 2 x i64> poison, i64 2222, i32 0
   %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %3 = mul <vscale x 2 x i64> %2, %1
   %4 = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
   %5 = shufflevector <vscale x 2 x i64> %4, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
@@ -321,7 +321,7 @@ define <vscale x 2 x i64> @mul_add_stepvector_nxv2i64_commutative(i64 %x, i64 %y
 entry:
   %0 = insertelement <vscale x 2 x i64> poison, i64 %y, i32 0
   %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %3 = mul <vscale x 2 x i64> %1, %2
   %4 = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
   %5 = shufflevector <vscale x 2 x i64> %4, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
@@ -338,7 +338,7 @@ define <vscale x 2 x i64> @mul_add_stepvector_bigconst_nxv2i64(i64 %x) {
 entry:
   %0 = insertelement <vscale x 2 x i64> poison, i64 146028888064, i32 0
   %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %3 = mul <vscale x 2 x i64> %2, %1
   %4 = insertelement <vscale x 2 x i64> poison, i64 %x, i32 0
   %5 = shufflevector <vscale x 2 x i64> %4, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
@@ -356,7 +356,7 @@ entry:
   %xmul = mul i64 %x, 3
   %0 = insertelement <vscale x 2 x i64> poison, i64 %xmul, i32 0
   %1 = shufflevector <vscale x 2 x i64> %0, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-  %2 = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %2 = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %3 = mul <vscale x 2 x i64> %2, %1
   %4 = insertelement <vscale x 2 x i64> poison, i64 %y, i32 0
   %5 = shufflevector <vscale x 2 x i64> %4, <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
@@ -372,7 +372,7 @@ define <vscale x 8 x i8> @shl_stepvector_nxv8i8() {
 entry:
   %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
   %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %3 = shl <vscale x 8 x i8> %2, %1
   ret <vscale x 8 x i8> %3
 }
@@ -389,7 +389,7 @@ define <vscale x 8 x i16> @sub_multiple_use_stepvector_nxv8i16() {
 entry:
   %0 = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
   %1 = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
+  %2 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
   %3 = sub <vscale x 8 x i16> %1, %2
   %4 = shl <vscale x 8 x i16> %2, %3
   ret <vscale x 8 x i16> %4
@@ -403,7 +403,7 @@ define <vscale x 8 x i16> @sub_stepvector_nxv8i16() {
 entry:
   %0 = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
   %1 = shufflevector <vscale x 8 x i16> %0, <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
+  %2 = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
   %3 = sub <vscale x 8 x i16> %1, %2
   ret <vscale x 8 x i16> %3
 }
@@ -416,7 +416,7 @@ define <vscale x 8 x i8> @promote_sub_stepvector_nxv8i8() {
 entry:
   %0 = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
   %1 = shufflevector <vscale x 8 x i8> %0, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %3 = sub <vscale x 8 x i8> %1, %2
   ret <vscale x 8 x i8> %3
 }
@@ -436,20 +436,20 @@ define <vscale x 16 x i32> @split_sub_stepvector_nxv16i32() {
 ; CHECK-NEXT:    add z3.s, z1.s, z3.s
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+  %0 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
   %1 = sub <vscale x 16 x i32> zeroinitializer, %0
   ret <vscale x 16 x i32> %1
 }
 
-declare <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-declare <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
-declare <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
+declare <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+declare <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+declare <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
+declare <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
 
-declare <vscale x 6 x i64> @llvm.experimental.stepvector.nxv6i64()
-declare <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
-declare <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
-declare <vscale x 3 x i32> @llvm.experimental.stepvector.nxv3i32()
-declare <vscale x 2 x i32> @llvm.experimental.stepvector.nxv2i32()
-declare <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
-declare <vscale x 4 x i16> @llvm.experimental.stepvector.nxv4i16()
+declare <vscale x 6 x i64> @llvm.stepvector.nxv6i64()
+declare <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+declare <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+declare <vscale x 3 x i32> @llvm.stepvector.nxv3i32()
+declare <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
+declare <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+declare <vscale x 4 x i16> @llvm.stepvector.nxv4i16()

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
index e2580c132f65e9..dcd16e093ea7e4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-stepvector.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64
 
-declare <2 x i8> @llvm.experimental.stepvector.v2i8()
+declare <2 x i8> @llvm.stepvector.v2i8()
 
 define <2 x i8> @stepvector_v2i8() {
 ; CHECK-LABEL: stepvector_v2i8:
@@ -10,11 +10,11 @@ define <2 x i8> @stepvector_v2i8() {
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <2 x i8> @llvm.experimental.stepvector.v2i8()
+  %v = call <2 x i8> @llvm.stepvector.v2i8()
   ret <2 x i8> %v
 }
 
-declare <3 x i8> @llvm.experimental.stepvector.v3i8()
+declare <3 x i8> @llvm.stepvector.v3i8()
 
 define <3 x i8> @stepvector_v3i8() {
 ; CHECK-LABEL: stepvector_v3i8:
@@ -22,11 +22,11 @@ define <3 x i8> @stepvector_v3i8() {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <3 x i8> @llvm.experimental.stepvector.v3i8()
+  %v = call <3 x i8> @llvm.stepvector.v3i8()
   ret <3 x i8> %v
 }
 
-declare <4 x i8> @llvm.experimental.stepvector.v4i8()
+declare <4 x i8> @llvm.stepvector.v4i8()
 
 define <4 x i8> @stepvector_v4i8() {
 ; CHECK-LABEL: stepvector_v4i8:
@@ -34,11 +34,11 @@ define <4 x i8> @stepvector_v4i8() {
 ; CHECK-NEXT:    vsetivli zero, 4, e8, mf4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x i8> @llvm.experimental.stepvector.v4i8()
+  %v = call <4 x i8> @llvm.stepvector.v4i8()
   ret <4 x i8> %v
 }
 
-declare <8 x i8> @llvm.experimental.stepvector.v8i8()
+declare <8 x i8> @llvm.stepvector.v8i8()
 
 define <8 x i8> @stepvector_v8i8() {
 ; CHECK-LABEL: stepvector_v8i8:
@@ -46,11 +46,11 @@ define <8 x i8> @stepvector_v8i8() {
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <8 x i8> @llvm.experimental.stepvector.v8i8()
+  %v = call <8 x i8> @llvm.stepvector.v8i8()
   ret <8 x i8> %v
 }
 
-declare <16 x i8> @llvm.experimental.stepvector.v16i8()
+declare <16 x i8> @llvm.stepvector.v16i8()
 
 define <16 x i8> @stepvector_v16i8() {
 ; CHECK-LABEL: stepvector_v16i8:
@@ -58,11 +58,11 @@ define <16 x i8> @stepvector_v16i8() {
 ; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <16 x i8> @llvm.experimental.stepvector.v16i8()
+  %v = call <16 x i8> @llvm.stepvector.v16i8()
   ret <16 x i8> %v
 }
 
-declare <2 x i16> @llvm.experimental.stepvector.v2i16()
+declare <2 x i16> @llvm.stepvector.v2i16()
 
 define <2 x i16> @stepvector_v2i16() {
 ; CHECK-LABEL: stepvector_v2i16:
@@ -70,11 +70,11 @@ define <2 x i16> @stepvector_v2i16() {
 ; CHECK-NEXT:    vsetivli zero, 2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <2 x i16> @llvm.experimental.stepvector.v2i16()
+  %v = call <2 x i16> @llvm.stepvector.v2i16()
   ret <2 x i16> %v
 }
 
-declare <4 x i16> @llvm.experimental.stepvector.v4i16()
+declare <4 x i16> @llvm.stepvector.v4i16()
 
 define <4 x i16> @stepvector_v4i16() {
 ; CHECK-LABEL: stepvector_v4i16:
@@ -82,11 +82,11 @@ define <4 x i16> @stepvector_v4i16() {
 ; CHECK-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x i16> @llvm.experimental.stepvector.v4i16()
+  %v = call <4 x i16> @llvm.stepvector.v4i16()
   ret <4 x i16> %v
 }
 
-declare <8 x i16> @llvm.experimental.stepvector.v8i16()
+declare <8 x i16> @llvm.stepvector.v8i16()
 
 define <8 x i16> @stepvector_v8i16() {
 ; CHECK-LABEL: stepvector_v8i16:
@@ -94,11 +94,11 @@ define <8 x i16> @stepvector_v8i16() {
 ; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <8 x i16> @llvm.experimental.stepvector.v8i16()
+  %v = call <8 x i16> @llvm.stepvector.v8i16()
   ret <8 x i16> %v
 }
 
-declare <16 x i16> @llvm.experimental.stepvector.v16i16()
+declare <16 x i16> @llvm.stepvector.v16i16()
 
 define <16 x i16> @stepvector_v16i16() {
 ; CHECK-LABEL: stepvector_v16i16:
@@ -106,11 +106,11 @@ define <16 x i16> @stepvector_v16i16() {
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <16 x i16> @llvm.experimental.stepvector.v16i16()
+  %v = call <16 x i16> @llvm.stepvector.v16i16()
   ret <16 x i16> %v
 }
 
-declare <2 x i32> @llvm.experimental.stepvector.v2i32()
+declare <2 x i32> @llvm.stepvector.v2i32()
 
 define <2 x i32> @stepvector_v2i32() {
 ; CHECK-LABEL: stepvector_v2i32:
@@ -118,11 +118,11 @@ define <2 x i32> @stepvector_v2i32() {
 ; CHECK-NEXT:    vsetivli zero, 2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <2 x i32> @llvm.experimental.stepvector.v2i32()
+  %v = call <2 x i32> @llvm.stepvector.v2i32()
   ret <2 x i32> %v
 }
 
-declare <4 x i32> @llvm.experimental.stepvector.v4i32()
+declare <4 x i32> @llvm.stepvector.v4i32()
 
 define <4 x i32> @stepvector_v4i32() {
 ; CHECK-LABEL: stepvector_v4i32:
@@ -130,11 +130,11 @@ define <4 x i32> @stepvector_v4i32() {
 ; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <4 x i32> @llvm.experimental.stepvector.v4i32()
+  %v = call <4 x i32> @llvm.stepvector.v4i32()
   ret <4 x i32> %v
 }
 
-declare <8 x i32> @llvm.experimental.stepvector.v8i32()
+declare <8 x i32> @llvm.stepvector.v8i32()
 
 define <8 x i32> @stepvector_v8i32() {
 ; CHECK-LABEL: stepvector_v8i32:
@@ -142,11 +142,11 @@ define <8 x i32> @stepvector_v8i32() {
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <8 x i32> @llvm.experimental.stepvector.v8i32()
+  %v = call <8 x i32> @llvm.stepvector.v8i32()
   ret <8 x i32> %v
 }
 
-declare <16 x i32> @llvm.experimental.stepvector.v16i32()
+declare <16 x i32> @llvm.stepvector.v16i32()
 
 define <16 x i32> @stepvector_v16i32() {
 ; CHECK-LABEL: stepvector_v16i32:
@@ -154,11 +154,11 @@ define <16 x i32> @stepvector_v16i32() {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <16 x i32> @llvm.experimental.stepvector.v16i32()
+  %v = call <16 x i32> @llvm.stepvector.v16i32()
   ret <16 x i32> %v
 }
 
-declare <2 x i64> @llvm.experimental.stepvector.v2i64()
+declare <2 x i64> @llvm.stepvector.v2i64()
 
 define <2 x i64> @stepvector_v2i64() {
 ; RV32-LABEL: stepvector_v2i64:
@@ -174,11 +174,11 @@ define <2 x i64> @stepvector_v2i64() {
 ; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
 ; RV64-NEXT:    vid.v v8
 ; RV64-NEXT:    ret
-  %v = call <2 x i64> @llvm.experimental.stepvector.v2i64()
+  %v = call <2 x i64> @llvm.stepvector.v2i64()
   ret <2 x i64> %v
 }
 
-declare <4 x i64> @llvm.experimental.stepvector.v4i64()
+declare <4 x i64> @llvm.stepvector.v4i64()
 
 define <4 x i64> @stepvector_v4i64() {
 ; RV32-LABEL: stepvector_v4i64:
@@ -195,11 +195,11 @@ define <4 x i64> @stepvector_v4i64() {
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vid.v v8
 ; RV64-NEXT:    ret
-  %v = call <4 x i64> @llvm.experimental.stepvector.v4i64()
+  %v = call <4 x i64> @llvm.stepvector.v4i64()
   ret <4 x i64> %v
 }
 
-declare <8 x i64> @llvm.experimental.stepvector.v8i64()
+declare <8 x i64> @llvm.stepvector.v8i64()
 
 define <8 x i64> @stepvector_v8i64() {
 ; RV32-LABEL: stepvector_v8i64:
@@ -216,11 +216,11 @@ define <8 x i64> @stepvector_v8i64() {
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
 ; RV64-NEXT:    vid.v v8
 ; RV64-NEXT:    ret
-  %v = call <8 x i64> @llvm.experimental.stepvector.v8i64()
+  %v = call <8 x i64> @llvm.stepvector.v8i64()
   ret <8 x i64> %v
 }
 
-declare <16 x i64> @llvm.experimental.stepvector.v16i64()
+declare <16 x i64> @llvm.stepvector.v16i64()
 
 define <16 x i64> @stepvector_v16i64() {
 ; RV32-LABEL: stepvector_v16i64:
@@ -238,6 +238,6 @@ define <16 x i64> @stepvector_v16i64() {
 ; RV64-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
 ; RV64-NEXT:    vid.v v8
 ; RV64-NEXT:    ret
-  %v = call <16 x i64> @llvm.experimental.stepvector.v16i64()
+  %v = call <16 x i64> @llvm.stepvector.v16i64()
   ret <16 x i64> %v
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
index e686ac881fabe0..9ae470c7898963 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-combine.ll
@@ -56,7 +56,7 @@ define void @strided_store_zero_start(i64 %n, ptr %p) {
 ; RV64-NEXT:    li a1, 56
 ; RV64-NEXT:    vsse64.v v8, (a0), a1
 ; RV64-NEXT:    ret
-  %step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = tail call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %gep = getelementptr inbounds %struct, ptr %p, <vscale x 1 x i64> %step, i32 6
   tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
   ret void
@@ -90,7 +90,7 @@ define void @strided_store_offset_start(i64 %n, ptr %p) {
 ; RV64-NEXT:    li a1, 56
 ; RV64-NEXT:    vsse64.v v8, (a0), a1
 ; RV64-NEXT:    ret
-  %step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = tail call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %.splatinsert = insertelement <vscale x 1 x i64> poison, i64 %n, i64 0
   %.splat = shufflevector <vscale x 1 x i64> %.splatinsert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %add = add <vscale x 1 x i64> %step, %.splat
@@ -118,12 +118,12 @@ define void @stride_one_store(i64 %n, ptr %p) {
 ; RV64-NEXT:    vmv.v.i v8, 0
 ; RV64-NEXT:    vs1r.v v8, (a1)
 ; RV64-NEXT:    ret
-  %step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = tail call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %gep = getelementptr inbounds i64, ptr %p, <vscale x 1 x i64> %step
   tail call void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64> zeroinitializer, <vscale x 1 x ptr> %gep, i32 8, <vscale x 1 x i1> splat (i1 true))
   ret void
 }
 
-declare <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+declare <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
 declare void @llvm.masked.scatter.nxv2i32.nxv2p0(<vscale x 2 x i32>, <vscale x 2 x ptr>, i32, <vscale x 2 x i1>)
 declare void @llvm.masked.scatter.nxv1i64.nxv1p0(<vscale x 1 x i64>, <vscale x 1 x ptr>, i32, <vscale x 1 x i1>)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
index 3cb3c94d4e1f24..58fd0de31402df 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr95865.ll
@@ -169,7 +169,7 @@ define i32 @main(i1 %arg.1, i64 %arg.2, i1 %arg.3, i64 %arg.4, i1 %arg.5, <vscal
 ; CHECK-NEXT:    addi sp, sp, 112
 ; CHECK-NEXT:    ret
 entry:
-  %0 = tail call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %0 = tail call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   br label %for.cond1.preheader.i
 
 for.cond1.preheader.i:                            ; preds = %for.cond.cleanup3.i, %entry

diff  --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index 064ea816593ac3..721f03120bd499 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple=riscv32 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV32
 ; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64
 
-declare <vscale x 1 x i8> @llvm.experimental.stepvector.nxv1i8()
+declare <vscale x 1 x i8> @llvm.stepvector.nxv1i8()
 
 define <vscale x 1 x i8> @stepvector_nxv1i8() {
 ; CHECK-LABEL: stepvector_nxv1i8:
@@ -10,11 +10,11 @@ define <vscale x 1 x i8> @stepvector_nxv1i8() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x i8> @llvm.experimental.stepvector.nxv1i8()
+  %v = call <vscale x 1 x i8> @llvm.stepvector.nxv1i8()
   ret <vscale x 1 x i8> %v
 }
 
-declare <vscale x 2 x i8> @llvm.experimental.stepvector.nxv2i8()
+declare <vscale x 2 x i8> @llvm.stepvector.nxv2i8()
 
 define <vscale x 2 x i8> @stepvector_nxv2i8() {
 ; CHECK-LABEL: stepvector_nxv2i8:
@@ -22,11 +22,11 @@ define <vscale x 2 x i8> @stepvector_nxv2i8() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x i8> @llvm.experimental.stepvector.nxv2i8()
+  %v = call <vscale x 2 x i8> @llvm.stepvector.nxv2i8()
   ret <vscale x 2 x i8> %v
 }
 
-declare <vscale x 3 x i8> @llvm.experimental.stepvector.nxv3i8()
+declare <vscale x 3 x i8> @llvm.stepvector.nxv3i8()
 
 define <vscale x 3 x i8> @stepvector_nxv3i8() {
 ; CHECK-LABEL: stepvector_nxv3i8:
@@ -34,11 +34,11 @@ define <vscale x 3 x i8> @stepvector_nxv3i8() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 3 x i8> @llvm.experimental.stepvector.nxv3i8()
+  %v = call <vscale x 3 x i8> @llvm.stepvector.nxv3i8()
   ret <vscale x 3 x i8> %v
 }
 
-declare <vscale x 4 x i8> @llvm.experimental.stepvector.nxv4i8()
+declare <vscale x 4 x i8> @llvm.stepvector.nxv4i8()
 
 define <vscale x 4 x i8> @stepvector_nxv4i8() {
 ; CHECK-LABEL: stepvector_nxv4i8:
@@ -46,11 +46,11 @@ define <vscale x 4 x i8> @stepvector_nxv4i8() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x i8> @llvm.experimental.stepvector.nxv4i8()
+  %v = call <vscale x 4 x i8> @llvm.stepvector.nxv4i8()
   ret <vscale x 4 x i8> %v
 }
 
-declare <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+declare <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
 
 define <vscale x 8 x i8> @stepvector_nxv8i8() {
 ; CHECK-LABEL: stepvector_nxv8i8:
@@ -58,7 +58,7 @@ define <vscale x 8 x i8> @stepvector_nxv8i8() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %v = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   ret <vscale x 8 x i8> %v
 }
 
@@ -70,8 +70,8 @@ define <vscale x 8 x i8> @add_stepvector_nxv8i8() {
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
-  %1 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %0 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
+  %1 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %2 = add <vscale x 8 x i8> %0, %1
   ret <vscale x 8 x i8> %2
 }
@@ -85,7 +85,7 @@ define <vscale x 8 x i8> @mul_stepvector_nxv8i8() {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
-  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %3 = mul <vscale x 8 x i8> %2, splat (i8 3)
   ret <vscale x 8 x i8> %3
 }
@@ -98,12 +98,12 @@ define <vscale x 8 x i8> @shl_stepvector_nxv8i8() {
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    ret
 entry:
-  %2 = call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %2 = call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %3 = shl <vscale x 8 x i8> %2, splat (i8 2)
   ret <vscale x 8 x i8> %3
 }
 
-declare <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
+declare <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
 
 define <vscale x 16 x i8> @stepvector_nxv16i8() {
 ; CHECK-LABEL: stepvector_nxv16i8:
@@ -111,11 +111,11 @@ define <vscale x 16 x i8> @stepvector_nxv16i8() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x i8> @llvm.experimental.stepvector.nxv16i8()
+  %v = call <vscale x 16 x i8> @llvm.stepvector.nxv16i8()
   ret <vscale x 16 x i8> %v
 }
 
-declare <vscale x 32 x i8> @llvm.experimental.stepvector.nxv32i8()
+declare <vscale x 32 x i8> @llvm.stepvector.nxv32i8()
 
 define <vscale x 32 x i8> @stepvector_nxv32i8() {
 ; CHECK-LABEL: stepvector_nxv32i8:
@@ -123,11 +123,11 @@ define <vscale x 32 x i8> @stepvector_nxv32i8() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 32 x i8> @llvm.experimental.stepvector.nxv32i8()
+  %v = call <vscale x 32 x i8> @llvm.stepvector.nxv32i8()
   ret <vscale x 32 x i8> %v
 }
 
-declare <vscale x 64 x i8> @llvm.experimental.stepvector.nxv64i8()
+declare <vscale x 64 x i8> @llvm.stepvector.nxv64i8()
 
 define <vscale x 64 x i8> @stepvector_nxv64i8() {
 ; CHECK-LABEL: stepvector_nxv64i8:
@@ -135,11 +135,11 @@ define <vscale x 64 x i8> @stepvector_nxv64i8() {
 ; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 64 x i8> @llvm.experimental.stepvector.nxv64i8()
+  %v = call <vscale x 64 x i8> @llvm.stepvector.nxv64i8()
   ret <vscale x 64 x i8> %v
 }
 
-declare <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
+declare <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
 
 define <vscale x 1 x i16> @stepvector_nxv1i16() {
 ; CHECK-LABEL: stepvector_nxv1i16:
@@ -147,11 +147,11 @@ define <vscale x 1 x i16> @stepvector_nxv1i16() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
+  %v = call <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
   ret <vscale x 1 x i16> %v
 }
 
-declare <vscale x 2 x i16> @llvm.experimental.stepvector.nxv2i16()
+declare <vscale x 2 x i16> @llvm.stepvector.nxv2i16()
 
 define <vscale x 2 x i16> @stepvector_nxv2i16() {
 ; CHECK-LABEL: stepvector_nxv2i16:
@@ -159,11 +159,11 @@ define <vscale x 2 x i16> @stepvector_nxv2i16() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x i16> @llvm.experimental.stepvector.nxv2i16()
+  %v = call <vscale x 2 x i16> @llvm.stepvector.nxv2i16()
   ret <vscale x 2 x i16> %v
 }
 
-declare <vscale x 2 x i15> @llvm.experimental.stepvector.nxv2i15()
+declare <vscale x 2 x i15> @llvm.stepvector.nxv2i15()
 
 define <vscale x 2 x i15> @stepvector_nxv2i15() {
 ; CHECK-LABEL: stepvector_nxv2i15:
@@ -171,11 +171,11 @@ define <vscale x 2 x i15> @stepvector_nxv2i15() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x i15> @llvm.experimental.stepvector.nxv2i15()
+  %v = call <vscale x 2 x i15> @llvm.stepvector.nxv2i15()
   ret <vscale x 2 x i15> %v
 }
 
-declare <vscale x 3 x i16> @llvm.experimental.stepvector.nxv3i16()
+declare <vscale x 3 x i16> @llvm.stepvector.nxv3i16()
 
 define <vscale x 3 x i16> @stepvector_nxv3i16() {
 ; CHECK-LABEL: stepvector_nxv3i16:
@@ -183,11 +183,11 @@ define <vscale x 3 x i16> @stepvector_nxv3i16() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 3 x i16> @llvm.experimental.stepvector.nxv3i16()
+  %v = call <vscale x 3 x i16> @llvm.stepvector.nxv3i16()
   ret <vscale x 3 x i16> %v
 }
 
-declare <vscale x 4 x i16> @llvm.experimental.stepvector.nxv4i16()
+declare <vscale x 4 x i16> @llvm.stepvector.nxv4i16()
 
 define <vscale x 4 x i16> @stepvector_nxv4i16() {
 ; CHECK-LABEL: stepvector_nxv4i16:
@@ -195,11 +195,11 @@ define <vscale x 4 x i16> @stepvector_nxv4i16() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x i16> @llvm.experimental.stepvector.nxv4i16()
+  %v = call <vscale x 4 x i16> @llvm.stepvector.nxv4i16()
   ret <vscale x 4 x i16> %v
 }
 
-declare <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
+declare <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
 
 define <vscale x 8 x i16> @stepvector_nxv8i16() {
 ; CHECK-LABEL: stepvector_nxv8i16:
@@ -207,11 +207,11 @@ define <vscale x 8 x i16> @stepvector_nxv8i16() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x i16> @llvm.experimental.stepvector.nxv8i16()
+  %v = call <vscale x 8 x i16> @llvm.stepvector.nxv8i16()
   ret <vscale x 8 x i16> %v
 }
 
-declare <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
+declare <vscale x 16 x i16> @llvm.stepvector.nxv16i16()
 
 define <vscale x 16 x i16> @stepvector_nxv16i16() {
 ; CHECK-LABEL: stepvector_nxv16i16:
@@ -219,7 +219,7 @@ define <vscale x 16 x i16> @stepvector_nxv16i16() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
+  %v = call <vscale x 16 x i16> @llvm.stepvector.nxv16i16()
   ret <vscale x 16 x i16> %v
 }
 
@@ -231,8 +231,8 @@ define <vscale x 16 x i16> @add_stepvector_nxv16i16() {
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
-  %1 = call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
+  %0 = call <vscale x 16 x i16> @llvm.stepvector.nxv16i16()
+  %1 = call <vscale x 16 x i16> @llvm.stepvector.nxv16i16()
   %2 = add <vscale x 16 x i16> %0, %1
   ret <vscale x 16 x i16> %2
 }
@@ -246,7 +246,7 @@ define <vscale x 16 x i16> @mul_stepvector_nxv16i16() {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
-  %2 = call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
+  %2 = call <vscale x 16 x i16> @llvm.stepvector.nxv16i16()
   %3 = mul <vscale x 16 x i16> %2, splat (i16 3)
   ret <vscale x 16 x i16> %3
 }
@@ -259,12 +259,12 @@ define <vscale x 16 x i16> @shl_stepvector_nxv16i16() {
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    ret
 entry:
-  %2 = call <vscale x 16 x i16> @llvm.experimental.stepvector.nxv16i16()
+  %2 = call <vscale x 16 x i16> @llvm.stepvector.nxv16i16()
   %3 = shl <vscale x 16 x i16> %2, splat (i16 2)
   ret <vscale x 16 x i16> %3
 }
 
-declare <vscale x 32 x i16> @llvm.experimental.stepvector.nxv32i16()
+declare <vscale x 32 x i16> @llvm.stepvector.nxv32i16()
 
 define <vscale x 32 x i16> @stepvector_nxv32i16() {
 ; CHECK-LABEL: stepvector_nxv32i16:
@@ -272,11 +272,11 @@ define <vscale x 32 x i16> @stepvector_nxv32i16() {
 ; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 32 x i16> @llvm.experimental.stepvector.nxv32i16()
+  %v = call <vscale x 32 x i16> @llvm.stepvector.nxv32i16()
   ret <vscale x 32 x i16> %v
 }
 
-declare <vscale x 1 x i32> @llvm.experimental.stepvector.nxv1i32()
+declare <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
 
 define <vscale x 1 x i32> @stepvector_nxv1i32() {
 ; CHECK-LABEL: stepvector_nxv1i32:
@@ -284,11 +284,11 @@ define <vscale x 1 x i32> @stepvector_nxv1i32() {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x i32> @llvm.experimental.stepvector.nxv1i32()
+  %v = call <vscale x 1 x i32> @llvm.stepvector.nxv1i32()
   ret <vscale x 1 x i32> %v
 }
 
-declare <vscale x 2 x i32> @llvm.experimental.stepvector.nxv2i32()
+declare <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
 
 define <vscale x 2 x i32> @stepvector_nxv2i32() {
 ; CHECK-LABEL: stepvector_nxv2i32:
@@ -296,11 +296,11 @@ define <vscale x 2 x i32> @stepvector_nxv2i32() {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x i32> @llvm.experimental.stepvector.nxv2i32()
+  %v = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
   ret <vscale x 2 x i32> %v
 }
 
-declare <vscale x 3 x i32> @llvm.experimental.stepvector.nxv3i32()
+declare <vscale x 3 x i32> @llvm.stepvector.nxv3i32()
 
 define <vscale x 3 x i32> @stepvector_nxv3i32() {
 ; CHECK-LABEL: stepvector_nxv3i32:
@@ -308,11 +308,11 @@ define <vscale x 3 x i32> @stepvector_nxv3i32() {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 3 x i32> @llvm.experimental.stepvector.nxv3i32()
+  %v = call <vscale x 3 x i32> @llvm.stepvector.nxv3i32()
   ret <vscale x 3 x i32> %v
 }
 
-declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+declare <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 
 define <vscale x 4 x i32> @stepvector_nxv4i32() {
 ; CHECK-LABEL: stepvector_nxv4i32:
@@ -320,11 +320,11 @@ define <vscale x 4 x i32> @stepvector_nxv4i32() {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+  %v = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
   ret <vscale x 4 x i32> %v
 }
 
-declare <vscale x 8 x i32> @llvm.experimental.stepvector.nxv8i32()
+declare <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
 
 define <vscale x 8 x i32> @stepvector_nxv8i32() {
 ; CHECK-LABEL: stepvector_nxv8i32:
@@ -332,11 +332,11 @@ define <vscale x 8 x i32> @stepvector_nxv8i32() {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x i32> @llvm.experimental.stepvector.nxv8i32()
+  %v = call <vscale x 8 x i32> @llvm.stepvector.nxv8i32()
   ret <vscale x 8 x i32> %v
 }
 
-declare <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+declare <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
 
 define <vscale x 16 x i32> @stepvector_nxv16i32() {
 ; CHECK-LABEL: stepvector_nxv16i32:
@@ -344,7 +344,7 @@ define <vscale x 16 x i32> @stepvector_nxv16i32() {
 ; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+  %v = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
   ret <vscale x 16 x i32> %v
 }
 
@@ -356,8 +356,8 @@ define <vscale x 16 x i32> @add_stepvector_nxv16i32() {
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
-  %1 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+  %0 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
+  %1 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
   %2 = add <vscale x 16 x i32> %0, %1
   ret <vscale x 16 x i32> %2
 }
@@ -371,7 +371,7 @@ define <vscale x 16 x i32> @mul_stepvector_nxv16i32() {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
-  %2 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+  %2 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
   %3 = mul <vscale x 16 x i32> %2, splat (i32 3)
   ret <vscale x 16 x i32> %3
 }
@@ -384,12 +384,12 @@ define <vscale x 16 x i32> @shl_stepvector_nxv16i32() {
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    ret
 entry:
-  %2 = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+  %2 = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
   %3 = shl <vscale x 16 x i32> %2, splat (i32 2)
   ret <vscale x 16 x i32> %3
 }
 
-declare <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+declare <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
 
 define <vscale x 1 x i64> @stepvector_nxv1i64() {
 ; CHECK-LABEL: stepvector_nxv1i64:
@@ -397,11 +397,11 @@ define <vscale x 1 x i64> @stepvector_nxv1i64() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %v = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   ret <vscale x 1 x i64> %v
 }
 
-declare <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+declare <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 
 define <vscale x 2 x i64> @stepvector_nxv2i64() {
 ; CHECK-LABEL: stepvector_nxv2i64:
@@ -409,11 +409,11 @@ define <vscale x 2 x i64> @stepvector_nxv2i64() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %v = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   ret <vscale x 2 x i64> %v
 }
 
-declare <vscale x 3 x i64> @llvm.experimental.stepvector.nxv3i64()
+declare <vscale x 3 x i64> @llvm.stepvector.nxv3i64()
 
 define <vscale x 3 x i64> @stepvector_nxv3i64() {
 ; CHECK-LABEL: stepvector_nxv3i64:
@@ -421,11 +421,11 @@ define <vscale x 3 x i64> @stepvector_nxv3i64() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 3 x i64> @llvm.experimental.stepvector.nxv3i64()
+  %v = call <vscale x 3 x i64> @llvm.stepvector.nxv3i64()
   ret <vscale x 3 x i64> %v
 }
 
-declare <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+declare <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 
 define <vscale x 4 x i64> @stepvector_nxv4i64() {
 ; CHECK-LABEL: stepvector_nxv4i64:
@@ -433,11 +433,11 @@ define <vscale x 4 x i64> @stepvector_nxv4i64() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %v = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   ret <vscale x 4 x i64> %v
 }
 
-declare <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+declare <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
 
 define <vscale x 8 x i64> @stepvector_nxv8i64() {
 ; CHECK-LABEL: stepvector_nxv8i64:
@@ -445,7 +445,7 @@ define <vscale x 8 x i64> @stepvector_nxv8i64() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, ma
 ; CHECK-NEXT:    vid.v v8
 ; CHECK-NEXT:    ret
-  %v = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+  %v = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
   ret <vscale x 8 x i64> %v
 }
 
@@ -457,8 +457,8 @@ define <vscale x 8 x i64> @add_stepvector_nxv8i64() {
 ; CHECK-NEXT:    vadd.vv v8, v8, v8
 ; CHECK-NEXT:    ret
 entry:
-  %0 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
-  %1 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+  %0 = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
+  %1 = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
   %2 = add <vscale x 8 x i64> %0, %1
   ret <vscale x 8 x i64> %2
 }
@@ -472,7 +472,7 @@ define <vscale x 8 x i64> @mul_stepvector_nxv8i64() {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    ret
 entry:
-  %2 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+  %2 = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
   %3 = mul <vscale x 8 x i64> %2, splat (i64 3)
   ret <vscale x 8 x i64> %3
 }
@@ -506,7 +506,7 @@ define <vscale x 8 x i64> @mul_bigimm_stepvector_nxv8i64() {
 ; RV64-NEXT:    vmul.vx v8, v8, a0
 ; RV64-NEXT:    ret
 entry:
-  %2 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+  %2 = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
   %3 = mul <vscale x 8 x i64> %2, splat (i64 33333333333)
   ret <vscale x 8 x i64> %3
 }
@@ -519,12 +519,12 @@ define <vscale x 8 x i64> @shl_stepvector_nxv8i64() {
 ; CHECK-NEXT:    vsll.vi v8, v8, 2
 ; CHECK-NEXT:    ret
 entry:
-  %2 = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+  %2 = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
   %3 = shl <vscale x 8 x i64> %2, splat (i64 2)
   ret <vscale x 8 x i64> %3
 }
 
-declare <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
+declare <vscale x 16 x i64> @llvm.stepvector.nxv16i64()
 
 define <vscale x 16 x i64> @stepvector_nxv16i64() {
 ; RV32-LABEL: stepvector_nxv16i64:
@@ -549,7 +549,7 @@ define <vscale x 16 x i64> @stepvector_nxv16i64() {
 ; RV64-NEXT:    vid.v v8
 ; RV64-NEXT:    vadd.vx v16, v8, a0
 ; RV64-NEXT:    ret
-  %v = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
+  %v = call <vscale x 16 x i64> @llvm.stepvector.nxv16i64()
   ret <vscale x 16 x i64> %v
 }
 
@@ -581,8 +581,8 @@ define <vscale x 16 x i64> @add_stepvector_nxv16i64() {
 ; RV64-NEXT:    vadd.vx v16, v8, a0
 ; RV64-NEXT:    ret
 entry:
-  %0 = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
-  %1 = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
+  %0 = call <vscale x 16 x i64> @llvm.stepvector.nxv16i64()
+  %1 = call <vscale x 16 x i64> @llvm.stepvector.nxv16i64()
   %2 = add <vscale x 16 x i64> %0, %1
   ret <vscale x 16 x i64> %2
 }
@@ -619,7 +619,7 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
 ; RV64-NEXT:    vadd.vx v16, v8, a0
 ; RV64-NEXT:    ret
 entry:
-  %2 = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
+  %2 = call <vscale x 16 x i64> @llvm.stepvector.nxv16i64()
   %3 = mul <vscale x 16 x i64> %2, splat (i64 3)
   ret <vscale x 16 x i64> %3
 }
@@ -673,7 +673,7 @@ define <vscale x 16 x i64> @mul_bigimm_stepvector_nxv16i64() {
 ; RV64-NEXT:    vadd.vx v16, v8, a0
 ; RV64-NEXT:    ret
 entry:
-  %2 = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
+  %2 = call <vscale x 16 x i64> @llvm.stepvector.nxv16i64()
   %3 = mul <vscale x 16 x i64> %2, splat (i64 33333333333)
   ret <vscale x 16 x i64> %3
 }
@@ -706,7 +706,7 @@ define <vscale x 16 x i64> @shl_stepvector_nxv16i64() {
 ; RV64-NEXT:    vadd.vx v16, v8, a0
 ; RV64-NEXT:    ret
 entry:
-  %2 = call <vscale x 16 x i64> @llvm.experimental.stepvector.nxv16i64()
+  %2 = call <vscale x 16 x i64> @llvm.stepvector.nxv16i64()
   %3 = shl <vscale x 16 x i64> %2, splat (i64 2)
   ret <vscale x 16 x i64> %3
 }
@@ -719,7 +719,7 @@ define <vscale x 2 x i64> @hi_bits_known_zero() vscale_range(2, 4) {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    ret
-  %step = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %step = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %and = and <vscale x 2 x i64> %step, splat (i64 u0xfffffffffffffff8)
   ret <vscale x 2 x i64> %and
 }
@@ -734,7 +734,7 @@ define <vscale x 2 x i64> @hi_bits_known_zero_overflow() vscale_range(2, 4) {
 ; CHECK-NEXT:    vmul.vx v8, v8, a0
 ; CHECK-NEXT:    vand.vi v8, v8, -8
 ; CHECK-NEXT:    ret
-  %step = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %step = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %step.mul = mul <vscale x 2 x i64> %step, splat (i64 u0xffffffffffffffff)
   %and = and <vscale x 2 x i64> %step.mul, splat (i64 u0xfffffffffffffff8)
   ret <vscale x 2 x i64> %and
@@ -747,7 +747,7 @@ define <vscale x 2 x i64> @lo_bits_known_zero() {
 ; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, ma
 ; CHECK-NEXT:    vmv.v.i v8, 0
 ; CHECK-NEXT:    ret
-  %step = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %step = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %step.mul = mul <vscale x 2 x i64> %step, splat (i64 8)
   %and = and <vscale x 2 x i64> %step.mul, splat (i64 7)
   ret <vscale x 2 x i64> %and

diff  --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
index 381d1183995be4..7565e0af8fa5bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store.ll
@@ -3,7 +3,7 @@
 
 %struct.foo = type { i32, i32, i32, i32 }
 
-declare <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+declare <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
 
 define <vscale x 1 x i64> @gather(ptr %a, i32 %len) {
 ; CHECK-LABEL: @gather(
@@ -30,7 +30,7 @@ define <vscale x 1 x i64> @gather(ptr %a, i32 %len) {
 vector.ph:
   %wide.trip.count = zext i32 %len to i64
   %0 = tail call i64 @llvm.vscale.i64()
-  %1 = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %1 = tail call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %.splatinsert = insertelement <vscale x 1 x i64> poison, i64 %0, i64 0
   %.splat = shufflevector <vscale x 1 x i64> %.splatinsert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   br label %vector.body
@@ -74,7 +74,7 @@ define <vscale x 1 x i64> @gather_disjoint_or(ptr %a, i64 %len) {
 ;
 vector.ph:
   %vscale = call i64 @llvm.vscale.i64()
-  %step = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = tail call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %step.mul2 = shl <vscale x 1 x i64> %step, splat (i64 1)
   br label %vector.body
 
@@ -127,7 +127,7 @@ define void @scatter(ptr %a, i32 %len) {
 vector.ph:
   %wide.trip.count = zext i32 %len to i64
   %0 = tail call i64 @llvm.vscale.i64()
-  %1 = tail call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %1 = tail call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %.splatinsert = insertelement <vscale x 1 x i64> poison, i64 %0, i64 0
   %.splat = shufflevector <vscale x 1 x i64> %.splatinsert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   br label %vector.body
@@ -154,7 +154,7 @@ define <vscale x 1 x i64> @gather_loopless(ptr %p, i64 %stride) {
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> [[TMP3]], <vscale x 1 x i64> poison, i32 [[TMP2]])
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
-  %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %splat.insert = insertelement <vscale x 1 x i64> poison, i64 %stride, i64 0
   %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %offsets = mul <vscale x 1 x i64> %step, %splat
@@ -176,7 +176,7 @@ define <vscale x 1 x i64> @straightline_offset_add(ptr %p, i64 %offset) {
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> [[TMP3]], <vscale x 1 x i64> poison, i32 [[TMP2]])
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
-  %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %splat.insert = insertelement <vscale x 1 x i64> poison, i64 %offset, i64 0
   %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %offsetv = add <vscale x 1 x i64> %step, %splat
@@ -198,7 +198,7 @@ define <vscale x 1 x i64> @straightline_offset_disjoint_or(ptr %p, i64 %offset)
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> [[TMP3]], <vscale x 1 x i64> poison, i32 [[TMP2]])
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
-  %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %step.shl = shl <vscale x 1 x i64> %step, splat (i64 1)
   %offsetv = or disjoint <vscale x 1 x i64> %step.shl, splat (i64 1)
   %ptrs = getelementptr i32, ptr %p, <vscale x 1 x i64> %offsetv
@@ -218,7 +218,7 @@ define <vscale x 1 x i64> @straightline_offset_shl(ptr %p) {
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> [[TMP2]], <vscale x 1 x i64> poison, i32 [[TMP1]])
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
-  %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %offset = shl <vscale x 1 x i64> %step, splat (i64 3)
   %ptrs = getelementptr i32, ptr %p, <vscale x 1 x i64> %offset
   %x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
@@ -232,7 +232,7 @@ define <vscale x 1 x i64> @straightline_offset_shl(ptr %p) {
 
 define <vscale x 1 x i64> @neg_shl_is_not_commutative(ptr %p) {
 ; CHECK-LABEL: @neg_shl_is_not_commutative(
-; CHECK-NEXT:    [[STEP:%.*]] = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+; CHECK-NEXT:    [[STEP:%.*]] = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
 ; CHECK-NEXT:    [[SPLAT_INSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 3, i64 0
 ; CHECK-NEXT:    [[SPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[SPLAT_INSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
 ; CHECK-NEXT:    [[OFFSET:%.*]] = shl <vscale x 1 x i64> [[SPLAT]], [[STEP]]
@@ -240,7 +240,7 @@ define <vscale x 1 x i64> @neg_shl_is_not_commutative(ptr %p) {
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr> [[PTRS]], i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> poison)
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
-  %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %splat.insert = insertelement <vscale x 1 x i64> poison, i64 3, i64 0
   %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %offset = shl <vscale x 1 x i64> %splat, %step
@@ -263,7 +263,7 @@ define <vscale x 1 x i64> @straightline_offset_shl_nonc(ptr %p, i64 %shift) {
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> [[TMP4]], <vscale x 1 x i64> poison, i32 [[TMP3]])
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
-  %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %splat.insert = insertelement <vscale x 1 x i64> poison, i64 %shift, i64 0
   %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %offset = shl <vscale x 1 x i64> %step, %splat
@@ -284,7 +284,7 @@ define void @scatter_loopless(<vscale x 1 x i64> %x, ptr %p, i64 %stride) {
 ; CHECK-NEXT:    call void @llvm.experimental.vp.strided.store.nxv1i64.p0.i64(<vscale x 1 x i64> [[X:%.*]], ptr [[P:%.*]], i64 [[TMP1]], <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), i32 [[TMP2]])
 ; CHECK-NEXT:    ret void
 ;
-  %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %splat.insert = insertelement <vscale x 1 x i64> poison, i64 %stride, i64 0
   %splat = shufflevector <vscale x 1 x i64> %splat.insert, <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
   %offsets = mul <vscale x 1 x i64> %step, %splat
@@ -323,7 +323,7 @@ define <vscale x 1 x i64> @vector_base_scalar_offset(ptr %p, i64 %offset) {
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.vp.select.nxv1i64(<vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> [[TMP2]], <vscale x 1 x i64> poison, i32 [[TMP1]])
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
-  %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %ptrs1 = getelementptr i64, ptr %p, <vscale x 1 x i64> %step
   %ptrs2 = getelementptr i64, <vscale x 1 x ptr> %ptrs1, i64 %offset
   %x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(
@@ -377,13 +377,13 @@ define <vscale x 1 x i64> @nonstrided_base_scalar_offset(ptr %p, <vscale x 1 x i
 ; We shouldn't be able to determine a scalar base here.
 define <vscale x 1 x i64> @vector_base_vector_offset(ptr %p, <vscale x 1 x i64> %offset) {
 ; CHECK-LABEL: @vector_base_vector_offset(
-; CHECK-NEXT:    [[STEP:%.*]] = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+; CHECK-NEXT:    [[STEP:%.*]] = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
 ; CHECK-NEXT:    [[PTRS1:%.*]] = getelementptr i64, ptr [[P:%.*]], <vscale x 1 x i64> [[STEP]]
 ; CHECK-NEXT:    [[PTRS2:%.*]] = getelementptr i64, <vscale x 1 x ptr> [[PTRS1]], <vscale x 1 x i64> [[OFFSET:%.*]]
 ; CHECK-NEXT:    [[X:%.*]] = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(<vscale x 1 x ptr> [[PTRS2]], i32 8, <vscale x 1 x i1> shufflevector (<vscale x 1 x i1> insertelement (<vscale x 1 x i1> poison, i1 true, i64 0), <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer), <vscale x 1 x i64> poison)
 ; CHECK-NEXT:    ret <vscale x 1 x i64> [[X]]
 ;
-  %step = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+  %step = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
   %ptrs1 = getelementptr i64, ptr %p, <vscale x 1 x i64> %step
   %ptrs2 = getelementptr i64, <vscale x 1 x ptr> %ptrs1, <vscale x 1 x i64> %offset
   %x = call <vscale x 1 x i64> @llvm.masked.gather.nxv1i64.nxv1p0(

diff  --git a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
index 5345bec22b9144..6f5bb3a6643991 100644
--- a/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/undef-earlyclobber-chain.ll
@@ -77,7 +77,7 @@ start:
     br i1 %0, label %Cond1, label %Cond2
 
 Cond1:                             ; preds = %start
-    %v15 = tail call <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
+    %v15 = tail call <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
     %v17 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %v15, i64 0)
     %vs12.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 1)
     %v18 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs12.i.i.i, i64 0)
@@ -86,7 +86,7 @@ Cond1:                             ; preds = %start
     br label %UseSR
 
 Cond2:                           ; preds = %start
-    %v15.2 = tail call <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
+    %v15.2 = tail call <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
     %v17.2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %v15.2, i64 1)
     %vs12.i.i.i.2 = add <vscale x 1 x i16> %v15.2, splat (i16 1)
     %v18.2 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs12.i.i.i.2, i64 1)
@@ -130,7 +130,7 @@ define internal void @SubRegLivenessUndef() {
 ; CHECK-NEXT:    vs1r.v v9, (zero)
 ; CHECK-NEXT:    j .LBB3_1
 loopIR.preheader.i.i:
-  %v15 = tail call <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
+  %v15 = tail call <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
   %v17 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %v15, i64 0)
   %vs12.i.i.i = add <vscale x 1 x i16> %v15, splat (i16 1)
   %v18 = tail call <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16> poison, <vscale x 1 x i16> %vs12.i.i.i, i64 0)
@@ -153,7 +153,7 @@ declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture)
 declare <vscale x 2 x float> @llvm.riscv.vrgather.vx.nxv2f32.i64(<vscale x 2 x float>, <vscale x 2 x float>, i64, i64) #2
 declare void @llvm.riscv.vse.nxv2f32.i64(<vscale x 2 x float>, ptr nocapture, i64)
 declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture)
-declare <vscale x 1 x i16> @llvm.experimental.stepvector.nxv1i16()
+declare <vscale x 1 x i16> @llvm.stepvector.nxv1i16()
 declare <vscale x 8 x i16> @llvm.vector.insert.nxv8i16.nxv1i16(<vscale x 8 x i16>, <vscale x 1 x i16>, i64 immarg)
 declare <vscale x 8 x i8> @llvm.riscv.vrgatherei16.vv.nxv8i8.i64(<vscale x 8 x i8>, <vscale x 8 x i8>, <vscale x 8 x i16>, i64)
 

diff  --git a/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll b/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll
index d010efc64e96f9..fd725e555a3260 100644
--- a/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll
+++ b/llvm/test/CodeGen/RISCV/vscale-demanded-bits.ll
@@ -2,7 +2,7 @@
 ; RUN: llc -mtriple riscv64 -mattr +v -filetype asm -o - %s | FileCheck %s
 
 declare i8 @llvm.vscale.i8()
-declare <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+declare <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
 
 define <vscale x 8 x i8> @f() #0 {
 ; CHECK-LABEL: f:
@@ -17,7 +17,7 @@ entry:
   %1 = shl i8 %0, 3
   %.splat.insert = insertelement <vscale x 8 x i8> poison, i8 %1, i64 0
   %.splat = shufflevector <vscale x 8 x i8> %.splat.insert, <vscale x 8 x i8> poison, <vscale x 8 x i32> zeroinitializer
-  %2 = tail call <vscale x 8 x i8> @llvm.experimental.stepvector.nxv8i8()
+  %2 = tail call <vscale x 8 x i8> @llvm.stepvector.nxv8i8()
   %3 = add <vscale x 8 x i8> %2, %.splat
   ret <vscale x 8 x i8> %3
 }

diff  --git a/llvm/test/Transforms/InstCombine/vscale_extractelement.ll b/llvm/test/Transforms/InstCombine/vscale_extractelement.ll
index f696ff469e205a..07090e9099ae15 100644
--- a/llvm/test/Transforms/InstCombine/vscale_extractelement.ll
+++ b/llvm/test/Transforms/InstCombine/vscale_extractelement.ll
@@ -162,7 +162,7 @@ define i64 @ext_lane0_from_stepvec() {
 ; CHECK-NEXT:    ret i64 0
 ;
 entry:
-  %0 = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %0 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %1 = extractelement <vscale x 4 x i64> %0, i32 0
   ret i64 %1
 }
@@ -173,7 +173,7 @@ define i32 @ext_lane3_from_stepvec() {
 ; CHECK-NEXT:    ret i32 3
 ;
 entry:
-  %0 = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+  %0 = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
   %1 = extractelement <vscale x 4 x i32> %0, i64 3
   ret i32 %1
 }
@@ -181,12 +181,12 @@ entry:
 define i64 @ext_lane_out_of_range_from_stepvec() {
 ; CHECK-LABEL: @ext_lane_out_of_range_from_stepvec(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 4 x i64> [[TMP0]], i64 4
 ; CHECK-NEXT:    ret i64 [[TMP1]]
 ;
 entry:
-  %0 = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %0 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %1 = extractelement <vscale x 4 x i64> %0, i32 4
   ret i64 %1
 }
@@ -194,12 +194,12 @@ entry:
 define i64 @ext_lane_invalid_from_stepvec() {
 ; CHECK-LABEL: @ext_lane_invalid_from_stepvec(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 4 x i64> [[TMP0]], i64 4294967295
 ; CHECK-NEXT:    ret i64 [[TMP1]]
 ;
 entry:
-  %0 = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %0 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %1 = extractelement <vscale x 4 x i64> %0, i32 -1
   ret i64 %1
 }
@@ -207,12 +207,12 @@ entry:
 define i64 @ext_lane_unknown_from_stepvec(i32 %v) {
 ; CHECK-LABEL: @ext_lane_unknown_from_stepvec(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP1:%.*]] = extractelement <vscale x 4 x i64> [[TMP0]], i32 [[V:%.*]]
 ; CHECK-NEXT:    ret i64 [[TMP1]]
 ;
 entry:
-  %0 = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+  %0 = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
   %1 = extractelement <vscale x 4 x i64> %0, i32 %v
   ret i64 %1
 }
@@ -225,7 +225,7 @@ define i8 @ext_lane256_from_stepvec() {
 ; CHECK-NEXT:    ret i8 poison
 ;
 entry:
-  %0 = call <vscale x 512 x i8> @llvm.experimental.stepvector.nxv512i8()
+  %0 = call <vscale x 512 x i8> @llvm.stepvector.nxv512i8()
   %1 = extractelement <vscale x 512 x i8> %0, i64 256
   ret i8 %1
 }
@@ -236,7 +236,7 @@ define i8 @ext_lane255_from_stepvec() {
 ; CHECK-NEXT:    ret i8 -1
 ;
 entry:
-  %0 = call <vscale x 512 x i8> @llvm.experimental.stepvector.nxv512i8()
+  %0 = call <vscale x 512 x i8> @llvm.stepvector.nxv512i8()
   %1 = extractelement <vscale x 512 x i8> %0, i64 255
   ret i8 %1
 }
@@ -250,7 +250,7 @@ define i64 @ext_lane0_from_add_with_stepvec(i64 %i) {
 ;
   %tmp = insertelement <vscale x 2 x i64> poison, i64 %i, i32 0
   %splatofi = shufflevector <vscale x 2 x i64> %tmp, <vscale x 2 x i64> poison, <vscale x  2 x i32> zeroinitializer
-  %stepvec = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %stepvec = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %add = add <vscale x 2 x i64> %splatofi, %stepvec
   %res = extractelement <vscale x 2 x i64> %add, i32 0
   ret i64 %res
@@ -263,7 +263,7 @@ define i1 @ext_lane1_from_cmp_with_stepvec(i64 %i) {
 ;
   %tmp = insertelement <vscale x 2 x i64> poison, i64 %i, i32 0
   %splatofi = shufflevector <vscale x 2 x i64> %tmp, <vscale x 2 x i64> poison, <vscale x  2 x i32> zeroinitializer
-  %stepvec = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+  %stepvec = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
   %cmp = icmp eq <vscale x 2 x i64> %splatofi, %stepvec
   %res = extractelement <vscale x 2 x i1> %cmp, i32 1
   ret i1 %res
@@ -282,7 +282,7 @@ entry:
   ret ptr %r
 }
 
-declare <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
-declare <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
-declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
-declare <vscale x 512 x i8> @llvm.experimental.stepvector.nxv512i8()
+declare <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
+declare <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
+declare <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
+declare <vscale x 512 x i8> @llvm.stepvector.nxv512i8()

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
index 9d8f187aea0285..290fca8269cb2d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/clamped-trip-count.ll
@@ -17,7 +17,7 @@ define void @clamped_tc_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range(1,1
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 4
 ; CHECK-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 8)
-; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = add <vscale x 4 x i64> [[TMP7]], zeroinitializer
 ; CHECK-NEXT:    [[TMP9:%.*]] = mul <vscale x 4 x i64> [[TMP8]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP9]]
@@ -108,7 +108,7 @@ define void @clamped_tc_max_8(ptr nocapture %dst, i32 %n, i64 %val) vscale_range
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 4
 ; CHECK-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[WIDE_TRIP_COUNT]])
-; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = add <vscale x 4 x i64> [[TMP7]], zeroinitializer
 ; CHECK-NEXT:    [[TMP9:%.*]] = mul <vscale x 4 x i64> [[TMP8]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP9]]

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll b/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
index bce2d6c14d8668..d5a0b3669502ac 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/divs-with-scalable-vfs.ll
@@ -138,7 +138,7 @@ define void @sdiv_feeding_gep_predicated(ptr %dst, i32 %x, i64 %M, i64 %conv6, i
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ugt i64 [[N]], [[TMP11]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
 ; CHECK-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[N]])
-; CHECK-NEXT:    [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT:    [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; CHECK-NEXT:    [[TMP16:%.*]] = add <vscale x 2 x i64> [[TMP15]], zeroinitializer
 ; CHECK-NEXT:    [[TMP17:%.*]] = mul <vscale x 2 x i64> [[TMP16]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP17]]
@@ -267,7 +267,7 @@ define void @udiv_urem_feeding_gep(i64 %x, ptr %dst, i64 %N) {
 ; CHECK-NEXT:    [[TMP13:%.*]] = icmp ugt i64 [[TMP0]], [[TMP11]]
 ; CHECK-NEXT:    [[TMP14:%.*]] = select i1 [[TMP13]], i64 [[TMP12]], i64 0
 ; CHECK-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 0, i64 [[TMP0]])
-; CHECK-NEXT:    [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT:    [[TMP15:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; CHECK-NEXT:    [[TMP16:%.*]] = add <vscale x 2 x i64> [[TMP15]], zeroinitializer
 ; CHECK-NEXT:    [[TMP17:%.*]] = mul <vscale x 2 x i64> [[TMP16]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP17]]

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll b/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll
index 736be4995c575f..78a1b7169829fd 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/outer_loop_prefer_scalable.ll
@@ -20,7 +20,7 @@ define void @foo() {
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
 ; CHECK-NEXT:    [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP19:%.*]] = mul i64 [[TMP18]], 4
-; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = add <vscale x 4 x i64> [[TMP4]], zeroinitializer
 ; CHECK-NEXT:    [[TMP6:%.*]] = mul <vscale x 4 x i64> [[TMP5]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP6]]

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
index 9bb94173985263..9e54f944e423a2 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/pr60831-sve-inv-store-crash.ll
@@ -22,7 +22,7 @@ define void @test_invar_gep(ptr %dst) #0 {
 ; CHECK-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; CHECK:       vector.body:
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[INDEX]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
 ; CHECK-NEXT:    [[TMP7:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP6]]

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll
index a3c108bca140bf..7fb9f39ca1b008 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/scalable-avoid-scalarization.ll
@@ -28,7 +28,7 @@ define void @test_no_scalarization(ptr %a, ptr noalias %b, i32 %idx, i32 %n) #0
 ; CHECK-NEXT:    [[TMP7:%.*]] = mul i32 [[TMP6]], 2
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i32> poison, i32 [[IDX]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i32> [[DOTSPLATINSERT]], <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 2 x i32> @llvm.experimental.stepvector.nxv2i32()
+; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 2 x i32> @llvm.stepvector.nxv2i32()
 ; CHECK-NEXT:    [[TMP9:%.*]] = add <vscale x 2 x i32> [[TMP8]], zeroinitializer
 ; CHECK-NEXT:    [[TMP10:%.*]] = mul <vscale x 2 x i32> [[TMP9]], shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1, i64 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i32> [[DOTSPLAT]], [[TMP10]]

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
index dac64c3d0f58d0..25f4a7ac2e9aa4 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-gather-scatter.ll
@@ -294,7 +294,7 @@ define void @gather_nxv4i32_ind64_stride2(ptr noalias nocapture %a, ptr noalias
 ; CHECK-NEXT:    [[N_VEC:%.*]] = and i64 [[N]], [[DOTNEG]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 3
-; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP5:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 2
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP7]], i64 0

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll
index 812af1a102083f..fec0c9200b5e80 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions-unusual-types.ll
@@ -19,7 +19,7 @@ define void @induction_i7(ptr %dst) #0 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = trunc i64 [[N_VEC]] to i7
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 4
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i8> @llvm.experimental.stepvector.nxv2i8()
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i8> @llvm.stepvector.nxv2i8()
 ; CHECK-NEXT:    [[TMP7:%.*]] = trunc <vscale x 2 x i8> [[TMP6]] to <vscale x 2 x i7>
 ; CHECK-NEXT:    [[TMP8:%.*]] = add <vscale x 2 x i7> [[TMP7]], zeroinitializer
 ; CHECK-NEXT:    [[TMP9:%.*]] = mul <vscale x 2 x i7> [[TMP8]], shufflevector (<vscale x 2 x i7> insertelement (<vscale x 2 x i7> poison, i7 1, i64 0), <vscale x 2 x i7> poison, <vscale x 2 x i32> zeroinitializer)
@@ -93,7 +93,7 @@ define void @induction_i3_zext(ptr %dst) #0 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = trunc i64 [[N_VEC]] to i3
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 4
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i8> @llvm.experimental.stepvector.nxv2i8()
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i8> @llvm.stepvector.nxv2i8()
 ; CHECK-NEXT:    [[TMP7:%.*]] = trunc <vscale x 2 x i8> [[TMP6]] to <vscale x 2 x i3>
 ; CHECK-NEXT:    [[TMP8:%.*]] = add <vscale x 2 x i3> [[TMP7]], zeroinitializer
 ; CHECK-NEXT:    [[TMP9:%.*]] = mul <vscale x 2 x i3> [[TMP8]], shufflevector (<vscale x 2 x i3> insertelement (<vscale x 2 x i3> poison, i3 1, i64 0), <vscale x 2 x i3> poison, <vscale x 2 x i32> zeroinitializer)

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
index 34fb5bb640471f..6b7f29e68a251d 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-inductions.ll
@@ -25,7 +25,7 @@ define void @cond_ind64(ptr noalias nocapture %a, ptr noalias nocapture readonly
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shl i64 [[TMP4]], 2
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = shl i64 [[TMP7]], 2
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP8]], i64 0

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
index ba8f69b63f0607..9641dd7d21fd2a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-accesses.ll
@@ -108,7 +108,7 @@ define void @test_array_load2_i16_store2(i32 %C, i32 %D) #1 {
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl <vscale x 4 x i64> [[TMP2]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 3
@@ -199,7 +199,7 @@ define void @test_array_load2_store2_i16(i32 noundef %C, i32 noundef %D) #1 {
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl <vscale x 4 x i64> [[TMP2]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shl nuw nsw i64 [[TMP4]], 3
@@ -279,7 +279,7 @@ define i32 @test_struct_load6(ptr %S) #1 {
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP4:%.*]] = shl nuw nsw i64 [[TMP3]], 2
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP4]], i64 0
@@ -383,7 +383,7 @@ define void @test_reversed_load2_store2(ptr noalias nocapture readonly %A, ptr n
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = sub <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1023, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), [[TMP2]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i32 @llvm.vscale.i32()
 ; CHECK-NEXT:    [[DOTNEG:%.*]] = mul nsw i32 [[TMP3]], -4
@@ -642,7 +642,7 @@ define void @load_gap_reverse(ptr noalias nocapture readonly %P1, ptr noalias no
 ; CHECK:       vector.ph:
 ; CHECK-NEXT:    [[TMP0:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
-; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP2:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = sub <vscale x 4 x i64> shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1023, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer), [[TMP2]]
 ; CHECK-NEXT:    [[TMP3:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[DOTNEG:%.*]] = mul nsw i64 [[TMP3]], -4
@@ -894,7 +894,7 @@ define void @PR27626_0(ptr %p, i32 %z, i64 %n) #1 {
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
-; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 2
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
@@ -981,7 +981,7 @@ define i32 @PR27626_1(ptr %p, i64 %n) #1 {
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
-; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 2
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
@@ -1076,7 +1076,7 @@ define void @PR27626_2(ptr %p, i64 %n, i32 %z) #1 {
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
-; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 2
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
@@ -1166,7 +1166,7 @@ define i32 @PR27626_3(ptr %p, i64 %n, i32 %z) #1 {
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub nsw i64 [[SMAX]], [[TMP6]]
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
-; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 2
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP11]], i64 0
@@ -1269,7 +1269,7 @@ define void @PR27626_4(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = shl nuw i64 [[N_VEC]], 1
 ; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shl nuw nsw i64 [[TMP6]], 2
-; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl <vscale x 4 x i64> [[TMP8]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP11:%.*]] = shl nuw nsw i64 [[TMP10]], 3
@@ -1365,7 +1365,7 @@ define void @PR27626_5(ptr %a, i32 %x, i32 %y, i32 %z, i64 %n) #1 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = or disjoint i64 [[TMP6]], 3
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 2
-; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP10:%.*]] = shl <vscale x 4 x i64> [[TMP9]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> [[TMP10]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 3, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP11:%.*]] = call i64 @llvm.vscale.i64()
@@ -1479,7 +1479,7 @@ define void @PR34743(ptr %a, ptr %b, i64 %n) #1 {
 ; CHECK-NEXT:    [[TMP12:%.*]] = shl nuw nsw i32 [[TMP11]], 2
 ; CHECK-NEXT:    [[TMP13:%.*]] = add nsw i32 [[TMP12]], -1
 ; CHECK-NEXT:    [[VECTOR_RECUR_INIT:%.*]] = insertelement <vscale x 4 x i16> poison, i16 [[DOTPRE]], i32 [[TMP13]]
-; CHECK-NEXT:    [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP15:%.*]] = shl <vscale x 4 x i64> [[TMP14]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP17:%.*]] = shl nuw nsw i64 [[TMP16]], 3

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
index 726d98f4d37d39..38bccf87a458ae 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-interleaved-masked-accesses.ll
@@ -37,7 +37,7 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no
 ; SCALAR_TAIL_FOLDING-NEXT:    [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]]
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP19:%.*]] = call i32 @llvm.vscale.i32()
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP20:%.*]] = shl i32 [[TMP19]], 4
-; SCALAR_TAIL_FOLDING-NEXT:    [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+; SCALAR_TAIL_FOLDING-NEXT:    [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vscale.i32()
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP6:%.*]] = shl i32 [[TMP5]], 4
 ; SCALAR_TAIL_FOLDING-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP6]], i64 0
@@ -117,7 +117,7 @@ define dso_local void @masked_strided1(ptr noalias nocapture readonly %p, ptr no
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 4
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP2:%.*]] = call i32 @llvm.usub.sat.i32(i32 1024, i32 [[TMP1]])
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024)
-; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP5:%.*]] = shl i32 [[TMP4]], 4
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0
@@ -229,7 +229,7 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no
 ; SCALAR_TAIL_FOLDING-NEXT:    [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]]
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP14:%.*]] = call i32 @llvm.vscale.i32()
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP15:%.*]] = shl i32 [[TMP14]], 4
-; SCALAR_TAIL_FOLDING-NEXT:    [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+; SCALAR_TAIL_FOLDING-NEXT:    [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vscale.i32()
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP6:%.*]] = shl i32 [[TMP5]], 4
 ; SCALAR_TAIL_FOLDING-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP6]], i64 0
@@ -292,7 +292,7 @@ define dso_local void @masked_strided2(ptr noalias nocapture readnone %p, ptr no
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 4
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP2:%.*]] = call i32 @llvm.usub.sat.i32(i32 1024, i32 [[TMP1]])
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024)
-; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP5:%.*]] = shl i32 [[TMP4]], 4
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0
@@ -391,7 +391,7 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no
 ; SCALAR_TAIL_FOLDING-NEXT:    [[N_VEC:%.*]] = sub nuw nsw i32 1024, [[N_MOD_VF]]
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP15:%.*]] = call i32 @llvm.vscale.i32()
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP16:%.*]] = shl i32 [[TMP15]], 4
-; SCALAR_TAIL_FOLDING-NEXT:    [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+; SCALAR_TAIL_FOLDING-NEXT:    [[TMP4:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP5:%.*]] = call i32 @llvm.vscale.i32()
 ; SCALAR_TAIL_FOLDING-NEXT:    [[TMP6:%.*]] = shl i32 [[TMP5]], 4
 ; SCALAR_TAIL_FOLDING-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP6]], i64 0
@@ -463,7 +463,7 @@ define dso_local void @masked_strided3(ptr noalias nocapture readnone %p, ptr no
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP1:%.*]] = shl i32 [[TMP0]], 4
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP2:%.*]] = call i32 @llvm.usub.sat.i32(i32 1024, i32 [[TMP1]])
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 16 x i1> @llvm.get.active.lane.mask.nxv16i1.i32(i32 0, i32 1024)
-; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.experimental.stepvector.nxv16i32()
+; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP3:%.*]] = call <vscale x 16 x i32> @llvm.stepvector.nxv16i32()
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP4:%.*]] = call i32 @llvm.vscale.i32()
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[TMP5:%.*]] = shl i32 [[TMP4]], 4
 ; PREDICATED_TAIL_FOLDING-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i32> poison, i32 [[TMP5]], i64 0

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
index 8500f5fc194c65..f6a6d021f03c9f 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
@@ -224,7 +224,7 @@ define void @copy_stride4(ptr noalias %dst, ptr noalias %src, i64 %n) #0 {
 ; CHECK-NEXT:    [[TMP11:%.*]] = icmp ugt i64 [[TMP2]], [[TMP9]]
 ; CHECK-NEXT:    [[TMP12:%.*]] = select i1 [[TMP11]], i64 [[TMP10]], i64 0
 ; CHECK-NEXT:    [[ACTIVE_LANE_MASK_ENTRY:%.*]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 0, i64 [[TMP2]])
-; CHECK-NEXT:    [[TMP13:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP13:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP14:%.*]] = add <vscale x 4 x i64> [[TMP13]], zeroinitializer
 ; CHECK-NEXT:    [[TMP15:%.*]] = mul <vscale x 4 x i64> [[TMP14]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 4, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP15]]

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
index 3bab341e1c248a..a1f6ba487e84e0 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-gep.ll
@@ -68,7 +68,7 @@ define void @pointer_induction_used_as_vector(ptr noalias %start.1, ptr noalias
 ; CHECK-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP8]], 0
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP11]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP12:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT:    [[TMP12:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; CHECK-NEXT:    [[TMP13:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP12]]
 ; CHECK-NEXT:    [[VECTOR_GEP:%.*]] = mul <vscale x 2 x i64> [[TMP13]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP14:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
@@ -161,7 +161,7 @@ define void @pointer_induction(ptr noalias %start, i64 %N) {
 ; CHECK-NEXT:    [[TMP9:%.*]] = mul i64 [[TMP6]], 0
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP9]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP10:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT:    [[TMP10:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; CHECK-NEXT:    [[TMP11:%.*]] = add <vscale x 2 x i64> [[DOTSPLAT]], [[TMP10]]
 ; CHECK-NEXT:    [[VECTOR_GEP:%.*]] = mul <vscale x 2 x i64> [[TMP11]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP12:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
index 626bb55cf2a77e..123b3cf3df14d5 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-widen-phi.ll
@@ -242,7 +242,7 @@ define i32 @pointer_iv_mixed(ptr noalias %a, ptr noalias %b, i64 %n) #0 {
 ; CHECK-NEXT:    [[VEC_PHI:%.*]] = phi <vscale x 2 x i32> [ zeroinitializer, [[VECTOR_PH]] ], [ [[TMP12:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = shl nuw nsw i64 [[TMP7]], 3
-; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; CHECK-NEXT:    [[VECTOR_GEP:%.*]] = shl <vscale x 2 x i64> [[TMP9]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 2, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP10:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
 ; CHECK-NEXT:    [[OFFSET_IDX:%.*]] = shl i64 [[INDEX]], 3
@@ -317,7 +317,7 @@ define void @phi_used_in_vector_compare_and_scalar_indvar_update_and_store(ptr %
 ; CHECK-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
 ; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP3:%.*]] = shl nuw nsw i64 [[TMP2]], 2
-; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; CHECK-NEXT:    [[VECTOR_GEP:%.*]] = shl <vscale x 2 x i64> [[TMP4]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP5:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 2 x i64> [[VECTOR_GEP]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = icmp ne <vscale x 2 x ptr> [[TMP5]], zeroinitializer

diff  --git a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
index def912a6d72cfb..3f67b386cba97a 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/tail-folding-styles.ll
@@ -125,7 +125,7 @@ define void @simple_memset_tailfold(i32 %val, ptr %ptr, i64 %n) "target-features
 ; DATA_NO_LANEMASK-NEXT:    [[TMP9:%.*]] = add i64 [[INDEX1]], 0
 ; DATA_NO_LANEMASK-NEXT:    [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[INDEX1]], i64 0
 ; DATA_NO_LANEMASK-NEXT:    [[BROADCAST_SPLAT3:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT2]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; DATA_NO_LANEMASK-NEXT:    [[TMP10:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; DATA_NO_LANEMASK-NEXT:    [[TMP10:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; DATA_NO_LANEMASK-NEXT:    [[TMP11:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
 ; DATA_NO_LANEMASK-NEXT:    [[VEC_IV:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT3]], [[TMP11]]
 ; DATA_NO_LANEMASK-NEXT:    [[TMP12:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
index b629b964a70c04..e35591b6f14d44 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/blocks-with-dead-instructions.ll
@@ -25,7 +25,7 @@ define void @block_with_dead_inst_1(ptr %src, i64 %N) #0 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
 ; CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 8
-; CHECK-NEXT:    [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+; CHECK-NEXT:    [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
 ; CHECK-NEXT:    [[TMP12:%.*]] = add <vscale x 8 x i64> [[TMP11]], zeroinitializer
 ; CHECK-NEXT:    [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP12]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 3, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]]
@@ -122,7 +122,7 @@ define void @block_with_dead_inst_2(ptr %src) #0 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
 ; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = mul i64 [[TMP6]], 4
-; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP9:%.*]] = add <vscale x 4 x i64> [[TMP8]], zeroinitializer
 ; CHECK-NEXT:    [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP9]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 3, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
@@ -219,7 +219,7 @@ define void @multiple_blocks_with_dead_insts_3(ptr %src) #0 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
 ; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = mul i64 [[TMP6]], 4
-; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP9:%.*]] = add <vscale x 4 x i64> [[TMP8]], zeroinitializer
 ; CHECK-NEXT:    [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP9]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 3, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
@@ -326,7 +326,7 @@ define void @multiple_blocks_with_dead_insts_4(ptr %src, i64 %N) #0 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
 ; CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 8
-; CHECK-NEXT:    [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+; CHECK-NEXT:    [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
 ; CHECK-NEXT:    [[TMP12:%.*]] = add <vscale x 8 x i64> [[TMP11]], zeroinitializer
 ; CHECK-NEXT:    [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP12]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 3, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]]
@@ -435,7 +435,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_5(ptr %src) #0 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
 ; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = mul i64 [[TMP6]], 4
-; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP9:%.*]] = add <vscale x 4 x i64> [[TMP8]], zeroinitializer
 ; CHECK-NEXT:    [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP9]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 3, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
@@ -552,7 +552,7 @@ define void @multiple_blocks_with_dead_inst_multiple_successors_6(ptr %src, i1 %
 ; CHECK-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
 ; CHECK-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 8
-; CHECK-NEXT:    [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+; CHECK-NEXT:    [[TMP11:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
 ; CHECK-NEXT:    [[TMP12:%.*]] = add <vscale x 8 x i64> [[TMP11]], zeroinitializer
 ; CHECK-NEXT:    [[TMP13:%.*]] = mul <vscale x 8 x i64> [[TMP12]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 3, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP13]]
@@ -872,7 +872,7 @@ define void @dead_load_in_block(ptr %dst, ptr %src, i8 %N, i64 %x) #0 {
 ; CHECK-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 3
 ; CHECK-NEXT:    [[TMP16:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP17:%.*]] = mul i64 [[TMP16]], 2
-; CHECK-NEXT:    [[TMP18:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT:    [[TMP18:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; CHECK-NEXT:    [[TMP19:%.*]] = add <vscale x 2 x i64> [[TMP18]], zeroinitializer
 ; CHECK-NEXT:    [[TMP20:%.*]] = mul <vscale x 2 x i64> [[TMP19]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 3, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP20]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
index da5db810ec6389..0697c1b6386e23 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/dead-ops-cost.ll
@@ -35,7 +35,7 @@ define void @dead_load(ptr %p, i16 %start) {
 ; CHECK-NEXT:    [[TMP14:%.*]] = mul i64 [[TMP13]], 8
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i64> poison, i64 [[START_EXT]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i64> [[DOTSPLATINSERT]], <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP15:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+; CHECK-NEXT:    [[TMP15:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
 ; CHECK-NEXT:    [[TMP16:%.*]] = add <vscale x 8 x i64> [[TMP15]], zeroinitializer
 ; CHECK-NEXT:    [[TMP17:%.*]] = mul <vscale x 8 x i64> [[TMP16]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 3, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 8 x i64> [[DOTSPLAT]], [[TMP17]]
@@ -117,7 +117,7 @@ define i8 @dead_live_out_due_to_scalar_epilogue_required(ptr %src, ptr %dst) {
 ; CHECK-NEXT:    [[IND_END:%.*]] = mul i32 [[N_VEC]], 4
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i32 @llvm.vscale.i32()
 ; CHECK-NEXT:    [[TMP8:%.*]] = mul i32 [[TMP7]], 4
-; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; CHECK-NEXT:    [[TMP9:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 ; CHECK-NEXT:    [[TMP10:%.*]] = add <vscale x 4 x i32> [[TMP9]], zeroinitializer
 ; CHECK-NEXT:    [[TMP11:%.*]] = mul <vscale x 4 x i32> [[TMP10]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 4, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP11]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
index 1307d57f32bc12..14d15d4307af67 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/mask-index-type.ll
@@ -22,7 +22,7 @@ define void @test(ptr noalias nocapture %a, ptr noalias nocapture %b, i32 %v) {
 ; VLENUNK-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
 ; VLENUNK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; VLENUNK-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 4
-; VLENUNK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; VLENUNK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; VLENUNK-NEXT:    [[TMP7:%.*]] = add <vscale x 4 x i64> [[TMP6]], zeroinitializer
 ; VLENUNK-NEXT:    [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP7]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; VLENUNK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
index a151232df0cd58..d01743adaae172 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/masked_gather_scatter.ll
@@ -41,7 +41,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
 ; RV32-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 16
 ; RV32-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
 ; RV32-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 2
-; RV32-NEXT:    [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; RV32-NEXT:    [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; RV32-NEXT:    [[TMP8:%.*]] = add <vscale x 2 x i64> [[TMP7]], zeroinitializer
 ; RV32-NEXT:    [[TMP9:%.*]] = mul <vscale x 2 x i64> [[TMP8]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 16, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; RV32-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP9]]
@@ -123,7 +123,7 @@ define void @foo4(ptr nocapture %A, ptr nocapture readonly %B, ptr nocapture rea
 ; RV64-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 16
 ; RV64-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
 ; RV64-NEXT:    [[TMP6:%.*]] = mul i64 [[TMP5]], 2
-; RV64-NEXT:    [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; RV64-NEXT:    [[TMP7:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; RV64-NEXT:    [[TMP8:%.*]] = add <vscale x 2 x i64> [[TMP7]], zeroinitializer
 ; RV64-NEXT:    [[TMP9:%.*]] = mul <vscale x 2 x i64> [[TMP8]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 16, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; RV64-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP9]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
index 5f814160037147..46ec78c5e8488d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/pr87378-vpinstruction-or-drop-poison-generating-flags.ll
@@ -20,7 +20,7 @@ define void @pr87378_vpinstruction_or_drop_poison_generating_flags(ptr %arg, i64
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 1001, [[N_MOD_VF]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 8
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 8 x i64> @llvm.experimental.stepvector.nxv8i64()
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 8 x i64> @llvm.stepvector.nxv8i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = add <vscale x 8 x i64> [[TMP6]], zeroinitializer
 ; CHECK-NEXT:    [[TMP8:%.*]] = mul <vscale x 8 x i64> [[TMP7]], shufflevector (<vscale x 8 x i64> insertelement (<vscale x 8 x i64> poison, i64 1, i64 0), <vscale x 8 x i64> poison, <vscale x 8 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 8 x i64> zeroinitializer, [[TMP8]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
index 6936887cd166c3..4e4163997b1c19 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/strided-accesses.ll
@@ -19,7 +19,7 @@ define void @single_constant_stride_int_scaled(ptr %p) {
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[TMP5]]
 ; CHECK-NEXT:    [[TMP6:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = mul i64 [[TMP6]], 4
-; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP8:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP9:%.*]] = add <vscale x 4 x i64> [[TMP8]], zeroinitializer
 ; CHECK-NEXT:    [[TMP10:%.*]] = mul <vscale x 4 x i64> [[TMP9]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP10]]
@@ -92,7 +92,7 @@ define void @single_constant_stride_int_iv(ptr %p) {
 ; CHECK-NEXT:    [[IND_END:%.*]] = mul i64 [[N_VEC]], 64
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 4
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = add <vscale x 4 x i64> [[TMP6]], zeroinitializer
 ; CHECK-NEXT:    [[TMP8:%.*]] = mul <vscale x 4 x i64> [[TMP7]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 64, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP8]]
@@ -183,7 +183,7 @@ define void @single_constant_stride_ptr_iv(ptr %p) {
 ; CHECK-NEXT:    [[TMP13:%.*]] = mul i64 [[TMP10]], 0
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP13]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; CHECK-NEXT:    [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP15:%.*]] = add <vscale x 4 x i64> [[DOTSPLAT]], [[TMP14]]
 ; CHECK-NEXT:    [[VECTOR_GEP:%.*]] = mul <vscale x 4 x i64> [[TMP15]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 8, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP16:%.*]] = getelementptr i8, ptr [[POINTER_PHI]], <vscale x 4 x i64> [[VECTOR_GEP]]
@@ -536,7 +536,7 @@ define void @double_stride_int_scaled(ptr %p, ptr %p2, i64 %stride) {
 ; STRIDED-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
 ; STRIDED-NEXT:    [[TMP10:%.*]] = call i64 @llvm.vscale.i64()
 ; STRIDED-NEXT:    [[TMP11:%.*]] = mul i64 [[TMP10]], 4
-; STRIDED-NEXT:    [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; STRIDED-NEXT:    [[TMP12:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; STRIDED-NEXT:    [[TMP13:%.*]] = add <vscale x 4 x i64> [[TMP12]], zeroinitializer
 ; STRIDED-NEXT:    [[TMP14:%.*]] = mul <vscale x 4 x i64> [[TMP13]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; STRIDED-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]]
@@ -759,7 +759,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
 ; STRIDED-NEXT:    [[TMP18:%.*]] = mul i64 [[TMP15]], 0
 ; STRIDED-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP18]], i64 0
 ; STRIDED-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; STRIDED-NEXT:    [[TMP19:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; STRIDED-NEXT:    [[TMP19:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; STRIDED-NEXT:    [[TMP20:%.*]] = add <vscale x 4 x i64> [[DOTSPLAT]], [[TMP19]]
 ; STRIDED-NEXT:    [[DOTSPLATINSERT9:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[STRIDE]], i64 0
 ; STRIDED-NEXT:    [[DOTSPLAT10:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT9]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
@@ -772,7 +772,7 @@ define void @double_stride_ptr_iv(ptr %p, ptr %p2, i64 %stride) {
 ; STRIDED-NEXT:    [[TMP26:%.*]] = mul i64 [[TMP23]], 0
 ; STRIDED-NEXT:    [[DOTSPLATINSERT13:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[TMP26]], i64 0
 ; STRIDED-NEXT:    [[DOTSPLAT14:%.*]] = shufflevector <vscale x 4 x i64> [[DOTSPLATINSERT13]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; STRIDED-NEXT:    [[TMP27:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; STRIDED-NEXT:    [[TMP27:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; STRIDED-NEXT:    [[TMP28:%.*]] = add <vscale x 4 x i64> [[DOTSPLAT14]], [[TMP27]]
 ; STRIDED-NEXT:    [[VECTOR_GEP17:%.*]] = mul <vscale x 4 x i64> [[TMP28]], [[DOTSPLAT10]]
 ; STRIDED-NEXT:    [[TMP29:%.*]] = getelementptr i8, ptr [[POINTER_PHI11]], <vscale x 4 x i64> [[VECTOR_GEP17]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
index 0ecba2f304682c..40213f99d63146 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/uniform-load-store.ll
@@ -326,7 +326,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
 ; SCALABLE-NEXT:    [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
 ; SCALABLE-NEXT:    [[TMP15:%.*]] = call i64 @llvm.vscale.i64()
 ; SCALABLE-NEXT:    [[TMP16:%.*]] = mul i64 [[TMP15]], 2
-; SCALABLE-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; SCALABLE-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; SCALABLE-NEXT:    [[TMP5:%.*]] = add <vscale x 2 x i64> [[TMP4]], zeroinitializer
 ; SCALABLE-NEXT:    [[TMP6:%.*]] = mul <vscale x 2 x i64> [[TMP5]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; SCALABLE-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP6]]
@@ -438,7 +438,7 @@ define void @conditional_uniform_load(ptr noalias nocapture %a, ptr noalias noca
 ; TF-SCALABLE-NEXT:    [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
 ; TF-SCALABLE-NEXT:    [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
 ; TF-SCALABLE-NEXT:    [[TMP20:%.*]] = mul i64 [[TMP19]], 2
-; TF-SCALABLE-NEXT:    [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; TF-SCALABLE-NEXT:    [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; TF-SCALABLE-NEXT:    [[TMP6:%.*]] = add <vscale x 2 x i64> [[TMP5]], zeroinitializer
 ; TF-SCALABLE-NEXT:    [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; TF-SCALABLE-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
@@ -921,7 +921,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
 ; SCALABLE-NEXT:    br label [[VECTOR_BODY:%.*]]
 ; SCALABLE:       vector.body:
 ; SCALABLE-NEXT:    [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
-; SCALABLE-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; SCALABLE-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; SCALABLE-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[INDEX]], i64 0
 ; SCALABLE-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
 ; SCALABLE-NEXT:    [[TMP5:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP4]]
@@ -1012,7 +1012,7 @@ define void @uniform_store_of_loop_varying(ptr noalias nocapture %a, ptr noalias
 ; TF-SCALABLE-NEXT:    [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
 ; TF-SCALABLE-NEXT:    [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
 ; TF-SCALABLE-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP14]], 2
-; TF-SCALABLE-NEXT:    [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; TF-SCALABLE-NEXT:    [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; TF-SCALABLE-NEXT:    [[TMP6:%.*]] = add <vscale x 2 x i64> [[TMP5]], zeroinitializer
 ; TF-SCALABLE-NEXT:    [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; TF-SCALABLE-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]
@@ -1145,7 +1145,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
 ; SCALABLE-NEXT:    [[N_VEC:%.*]] = sub i64 1025, [[N_MOD_VF]]
 ; SCALABLE-NEXT:    [[TMP14:%.*]] = call i64 @llvm.vscale.i64()
 ; SCALABLE-NEXT:    [[TMP15:%.*]] = mul i64 [[TMP14]], 2
-; SCALABLE-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; SCALABLE-NEXT:    [[TMP4:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; SCALABLE-NEXT:    [[TMP5:%.*]] = add <vscale x 2 x i64> [[TMP4]], zeroinitializer
 ; SCALABLE-NEXT:    [[TMP6:%.*]] = mul <vscale x 2 x i64> [[TMP5]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; SCALABLE-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP6]]
@@ -1256,7 +1256,7 @@ define void @conditional_uniform_store(ptr noalias nocapture %a, ptr noalias noc
 ; TF-SCALABLE-NEXT:    [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
 ; TF-SCALABLE-NEXT:    [[TMP19:%.*]] = call i64 @llvm.vscale.i64()
 ; TF-SCALABLE-NEXT:    [[TMP20:%.*]] = mul i64 [[TMP19]], 2
-; TF-SCALABLE-NEXT:    [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; TF-SCALABLE-NEXT:    [[TMP5:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; TF-SCALABLE-NEXT:    [[TMP6:%.*]] = add <vscale x 2 x i64> [[TMP5]], zeroinitializer
 ; TF-SCALABLE-NEXT:    [[TMP7:%.*]] = mul <vscale x 2 x i64> [[TMP6]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
 ; TF-SCALABLE-NEXT:    [[INDUCTION:%.*]] = add <vscale x 2 x i64> zeroinitializer, [[TMP7]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll
index 8bde5ba5f15193..b4cb0d0a098c6d 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-cond-reduction.ll
@@ -275,7 +275,7 @@ define i32 @cond_add_pred(ptr %a, i64 %n, i32 %start) {
 ; IF-EVL-INLOOP-NEXT:    [[TMP13:%.*]] = add i64 [[EVL_BASED_IV]], 0
 ; IF-EVL-INLOOP-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[EVL_BASED_IV]], i64 0
 ; IF-EVL-INLOOP-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-INLOOP-NEXT:    [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; IF-EVL-INLOOP-NEXT:    [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; IF-EVL-INLOOP-NEXT:    [[TMP15:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]]
 ; IF-EVL-INLOOP-NEXT:    [[VEC_IV:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP15]]
 ; IF-EVL-INLOOP-NEXT:    [[TMP16:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]]
@@ -509,7 +509,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
 ; NO-VP-OUTLOOP-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
 ; NO-VP-OUTLOOP-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 4
 ; NO-VP-OUTLOOP-NEXT:    [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
-; NO-VP-OUTLOOP-NEXT:    [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; NO-VP-OUTLOOP-NEXT:    [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 ; NO-VP-OUTLOOP-NEXT:    [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP12]], zeroinitializer
 ; NO-VP-OUTLOOP-NEXT:    [[TMP14:%.*]] = mul <vscale x 4 x i32> [[TMP13]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
 ; NO-VP-OUTLOOP-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP14]]
@@ -572,7 +572,7 @@ define i32 @step_cond_add(ptr %a, i64 %n, i32 %start) {
 ; NO-VP-INLOOP-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
 ; NO-VP-INLOOP-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; NO-VP-INLOOP-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 4
-; NO-VP-INLOOP-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; NO-VP-INLOOP-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 ; NO-VP-INLOOP-NEXT:    [[TMP7:%.*]] = add <vscale x 4 x i32> [[TMP6]], zeroinitializer
 ; NO-VP-INLOOP-NEXT:    [[TMP8:%.*]] = mul <vscale x 4 x i32> [[TMP7]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
 ; NO-VP-INLOOP-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP8]]
@@ -705,7 +705,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
 ; NO-VP-OUTLOOP-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
 ; NO-VP-OUTLOOP-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 4
 ; NO-VP-OUTLOOP-NEXT:    [[TMP11:%.*]] = insertelement <vscale x 4 x i32> zeroinitializer, i32 [[START]], i32 0
-; NO-VP-OUTLOOP-NEXT:    [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; NO-VP-OUTLOOP-NEXT:    [[TMP12:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 ; NO-VP-OUTLOOP-NEXT:    [[TMP13:%.*]] = add <vscale x 4 x i32> [[TMP12]], zeroinitializer
 ; NO-VP-OUTLOOP-NEXT:    [[TMP14:%.*]] = mul <vscale x 4 x i32> [[TMP13]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
 ; NO-VP-OUTLOOP-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP14]]
@@ -773,7 +773,7 @@ define i32 @step_cond_add_pred(ptr %a, i64 %n, i32 %start) {
 ; NO-VP-INLOOP-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
 ; NO-VP-INLOOP-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; NO-VP-INLOOP-NEXT:    [[TMP5:%.*]] = mul i64 [[TMP4]], 4
-; NO-VP-INLOOP-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; NO-VP-INLOOP-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 ; NO-VP-INLOOP-NEXT:    [[TMP7:%.*]] = add <vscale x 4 x i32> [[TMP6]], zeroinitializer
 ; NO-VP-INLOOP-NEXT:    [[TMP8:%.*]] = mul <vscale x 4 x i32> [[TMP7]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
 ; NO-VP-INLOOP-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i32> zeroinitializer, [[TMP8]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll
index 895c89b768acb3..69aa7bc7409837 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-interleave.ll
@@ -28,7 +28,7 @@ define void @interleave(ptr noalias %a, ptr noalias %b, i64 %N) {
 ; IF-EVL-NEXT:    [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[N]], 1
 ; IF-EVL-NEXT:    [[TMP9:%.*]] = call i64 @llvm.vscale.i64()
 ; IF-EVL-NEXT:    [[TMP10:%.*]] = mul i64 [[TMP9]], 8
-; IF-EVL-NEXT:    [[TMP11:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; IF-EVL-NEXT:    [[TMP11:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; IF-EVL-NEXT:    [[TMP12:%.*]] = add <vscale x 4 x i64> [[TMP11]], zeroinitializer
 ; IF-EVL-NEXT:    [[TMP13:%.*]] = mul <vscale x 4 x i64> [[TMP12]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; IF-EVL-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP13]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll
index 9d02ce715139e4..b8b2558247fa64 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-masked-loadstore.ll
@@ -38,7 +38,7 @@ define void @masked_loadstore(ptr noalias %a, ptr noalias %b, i64 %n) {
 ; IF-EVL-NEXT:    [[TMP13:%.*]] = add i64 [[EVL_BASED_IV]], 0
 ; IF-EVL-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[EVL_BASED_IV]], i64 0
 ; IF-EVL-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-NEXT:    [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; IF-EVL-NEXT:    [[TMP14:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; IF-EVL-NEXT:    [[TMP15:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP14]]
 ; IF-EVL-NEXT:    [[VEC_IV:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP15]]
 ; IF-EVL-NEXT:    [[TMP16:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT2]]

diff  --git a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll
index d62f70c06a5fbf..c5a89d48f77b0b 100644
--- a/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll
+++ b/llvm/test/Transforms/LoopVectorize/RISCV/vectorize-force-tail-with-evl-reverse-load-store.ll
@@ -139,7 +139,7 @@ define void @reverse_load_store_masked(i64 %startval, ptr noalias %ptr, ptr noal
 ; IF-EVL-NEXT:    [[TMP10:%.*]] = add i32 [[OFFSET_IDX3]], 0
 ; IF-EVL-NEXT:    [[BROADCAST_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i64> poison, i64 [[EVL_BASED_IV]], i64 0
 ; IF-EVL-NEXT:    [[BROADCAST_SPLAT:%.*]] = shufflevector <vscale x 4 x i64> [[BROADCAST_SPLATINSERT]], <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer
-; IF-EVL-NEXT:    [[TMP11:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; IF-EVL-NEXT:    [[TMP11:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; IF-EVL-NEXT:    [[TMP12:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP11]]
 ; IF-EVL-NEXT:    [[VEC_IV:%.*]] = add <vscale x 4 x i64> [[BROADCAST_SPLAT]], [[TMP12]]
 ; IF-EVL-NEXT:    [[TMP13:%.*]] = icmp ule <vscale x 4 x i64> [[VEC_IV]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1023, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)

diff  --git a/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll b/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll
index 08f2b823815b1c..a1d2f6c767b76f 100644
--- a/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll
+++ b/llvm/test/Transforms/LoopVectorize/outer_loop_scalable.ll
@@ -26,7 +26,7 @@ define void @foo() {
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 1024, [[N_MOD_VF]]
 ; CHECK-NEXT:    [[TMP18:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP19:%.*]] = mul i64 [[TMP18]], 4
-; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.experimental.stepvector.nxv4i64()
+; CHECK-NEXT:    [[TMP4:%.*]] = call <vscale x 4 x i64> @llvm.stepvector.nxv4i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = add <vscale x 4 x i64> [[TMP4]], zeroinitializer
 ; CHECK-NEXT:    [[TMP6:%.*]] = mul <vscale x 4 x i64> [[TMP5]], shufflevector (<vscale x 4 x i64> insertelement (<vscale x 4 x i64> poison, i64 1, i64 0), <vscale x 4 x i64> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = add <vscale x 4 x i64> zeroinitializer, [[TMP6]]

diff  --git a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
index b97ceba8b0116e..34461be1ed5ab3 100644
--- a/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
+++ b/llvm/test/Transforms/LoopVectorize/scalable-inductions.ll
@@ -21,7 +21,7 @@ define void @add_ind64_unrolled(ptr noalias nocapture %a, ptr noalias nocapture
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shl i64 [[TMP4]], 2
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.experimental.stepvector.nxv2i64()
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 2 x i64> @llvm.stepvector.nxv2i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP8:%.*]] = shl i64 [[TMP7]], 1
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[TMP8]], i64 0
@@ -103,7 +103,7 @@ define void @add_ind64_unrolled_nxv1i64(ptr noalias nocapture %a, ptr noalias no
 ; CHECK-NEXT:    [[N_VEC:%.*]] = sub i64 [[N]], [[N_MOD_VF]]
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shl i64 [[TMP4]], 1
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 1 x i64> @llvm.experimental.stepvector.nxv1i64()
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 1 x i64> @llvm.stepvector.nxv1i64()
 ; CHECK-NEXT:    [[TMP7:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 1 x i64> poison, i64 [[TMP7]], i64 0
 ; CHECK-NEXT:    [[DOTSPLAT:%.*]] = shufflevector <vscale x 1 x i64> [[DOTSPLATINSERT]], <vscale x 1 x i64> poison, <vscale x 1 x i32> zeroinitializer
@@ -191,7 +191,7 @@ define void @add_unique_ind32(ptr noalias nocapture %a, i64 %n) {
 ; CHECK-NEXT:    [[IND_END:%.*]] = shl i32 [[DOTCAST]], 1
 ; CHECK-NEXT:    [[TMP4:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP5:%.*]] = shl i64 [[TMP4]], 2
-; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; CHECK-NEXT:    [[TMP6:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 ; CHECK-NEXT:    [[TMP7:%.*]] = shl <vscale x 4 x i32> [[TMP6]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[TMP8:%.*]] = call i32 @llvm.vscale.i32()
 ; CHECK-NEXT:    [[TMP9:%.*]] = shl i32 [[TMP8]], 3
@@ -269,7 +269,7 @@ define void @add_unique_indf32(ptr noalias nocapture %a, i64 %n) {
 ; CHECK-NEXT:    [[IND_END:%.*]] = fadd float [[TMP4]], 0.000000e+00
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP6:%.*]] = shl i64 [[TMP5]], 2
-; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; CHECK-NEXT:    [[TMP7:%.*]] = call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 ; CHECK-NEXT:    [[TMP8:%.*]] = uitofp <vscale x 4 x i32> [[TMP7]] to <vscale x 4 x float>
 ; CHECK-NEXT:    [[TMP9:%.*]] = fmul <vscale x 4 x float> [[TMP8]], shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 2.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer)
 ; CHECK-NEXT:    [[INDUCTION:%.*]] = fadd <vscale x 4 x float> [[TMP9]], zeroinitializer

diff  --git a/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll b/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll
index dee0af0cc09e05..27d9e6b7b4cb36 100644
--- a/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll
+++ b/llvm/test/Transforms/MemCpyOpt/vscale-crashes.ll
@@ -69,7 +69,7 @@ define void @merge_stores_second_scalable(ptr %ptr) {
 define void @callslotoptzn(<vscale x 4 x float> %val, ptr %out) {
 ; CHECK-LABEL: @callslotoptzn(
 ; CHECK-NEXT:    [[ALLOC:%.*]] = alloca <vscale x 4 x float>, align 16
-; CHECK-NEXT:    [[IDX:%.*]] = tail call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+; CHECK-NEXT:    [[IDX:%.*]] = tail call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 ; CHECK-NEXT:    [[STRIDE:%.*]] = getelementptr inbounds float, ptr [[ALLOC]], <vscale x 4 x i32> [[IDX]]
 ; CHECK-NEXT:    call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> [[VAL:%.*]], <vscale x 4 x ptr> [[STRIDE]], i32 4, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer))
 ; CHECK-NEXT:    [[LI:%.*]] = load <vscale x 4 x float>, ptr [[ALLOC]], align 4
@@ -77,7 +77,7 @@ define void @callslotoptzn(<vscale x 4 x float> %val, ptr %out) {
 ; CHECK-NEXT:    ret void
 ;
   %alloc = alloca <vscale x 4 x float>, align 16
-  %idx = tail call <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+  %idx = tail call <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
   %stride = getelementptr inbounds float, ptr %alloc, <vscale x 4 x i32> %idx
   call void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> %val, <vscale x 4 x ptr> %stride, i32 4, <vscale x 4 x i1> splat (i1 true))
   %li = load <vscale x 4 x float>, ptr %alloc, align 4
@@ -123,5 +123,5 @@ define void @memmove_agg2(ptr %a, ptr %b) {
   ret void
 }
 
-declare <vscale x 4 x i32> @llvm.experimental.stepvector.nxv4i32()
+declare <vscale x 4 x i32> @llvm.stepvector.nxv4i32()
 declare void @llvm.masked.scatter.nxv4f32.nxv4p0(<vscale x 4 x float> , <vscale x 4 x ptr> , i32, <vscale x 4 x i1>)

diff  --git a/llvm/test/Verifier/stepvector-intrinsic.ll b/llvm/test/Verifier/stepvector-intrinsic.ll
index ac4ad670287f87..42d0ff2c2b87a3 100644
--- a/llvm/test/Verifier/stepvector-intrinsic.ll
+++ b/llvm/test/Verifier/stepvector-intrinsic.ll
@@ -4,26 +4,26 @@
 
 define i32 @stepvector_i32() {
 ; CHECK: Intrinsic has incorrect return type!
-  %1 = call i32 @llvm.experimental.stepvector.i32()
+  %1 = call i32 @llvm.stepvector.i32()
   ret i32 %1
 }
 
 ; Reject vectors with non-integer elements
 
 define <vscale x 4 x float> @stepvector_float() {
-; CHECK: experimental_stepvector only supported for vectors of integers with a bitwidth of at least 8
-  %1 = call <vscale x 4 x float> @llvm.experimental.stepvector.nxv4f32()
+; CHECK: stepvector only supported for vectors of integers with a bitwidth of at least 8
+  %1 = call <vscale x 4 x float> @llvm.stepvector.nxv4f32()
   ret <vscale x 4 x float> %1
 }
 
 ; Reject vectors of integers less than 8 bits in width
 
 define <vscale x 16 x i1> @stepvector_i1() {
-; CHECK: experimental_stepvector only supported for vectors of integers with a bitwidth of at least 8
-  %1 = call <vscale x 16 x i1> @llvm.experimental.stepvector.nxv16i1()
+; CHECK: stepvector only supported for vectors of integers with a bitwidth of at least 8
+  %1 = call <vscale x 16 x i1> @llvm.stepvector.nxv16i1()
   ret <vscale x 16 x i1> %1
 }
 
-declare i32 @llvm.experimental.stepvector.i32()
-declare <vscale x 4 x float> @llvm.experimental.stepvector.nxv4f32()
-declare <vscale x 16 x i1> @llvm.experimental.stepvector.nxv16i1()
+declare i32 @llvm.stepvector.i32()
+declare <vscale x 4 x float> @llvm.stepvector.nxv4f32()
+declare <vscale x 16 x i1> @llvm.stepvector.nxv16i1()

diff  --git a/llvm/unittests/IR/IRBuilderTest.cpp b/llvm/unittests/IR/IRBuilderTest.cpp
index 434cca93ae720a..9a4d0afbb2d1bc 100644
--- a/llvm/unittests/IR/IRBuilderTest.cpp
+++ b/llvm/unittests/IR/IRBuilderTest.cpp
@@ -241,7 +241,7 @@ TEST_F(IRBuilderTest, CreateStepVector) {
   CallInst *Call = cast<CallInst>(StepVec);
   FunctionType *FTy = Call->getFunctionType();
   EXPECT_EQ(FTy->getReturnType(), DstVecTy);
-  EXPECT_EQ(Call->getIntrinsicID(), Intrinsic::experimental_stepvector);
+  EXPECT_EQ(Call->getIntrinsicID(), Intrinsic::stepvector);
 }
 
 TEST_F(IRBuilderTest, CreateStepVectorI3) {
@@ -260,7 +260,7 @@ TEST_F(IRBuilderTest, CreateStepVectorI3) {
   CallInst *Call = cast<CallInst>(Trunc->getOperand(0));
   FunctionType *FTy = Call->getFunctionType();
   EXPECT_EQ(FTy->getReturnType(), VecI8Ty);
-  EXPECT_EQ(Call->getIntrinsicID(), Intrinsic::experimental_stepvector);
+  EXPECT_EQ(Call->getIntrinsicID(), Intrinsic::stepvector);
 }
 
 TEST_F(IRBuilderTest, ConstrainedFP) {

diff  --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index 86983e95fdd33d..3822eb3b3f1f6c 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -998,7 +998,7 @@ def LLVM_vscale : LLVM_IntrOp<"vscale", [0], [], [], 1>;
 
 /// Create a call to stepvector intrinsic.
 def LLVM_StepVectorOp
-    : LLVM_IntrOp<"experimental.stepvector", [0], [], [Pure], 1> {
+    : LLVM_IntrOp<"stepvector", [0], [], [Pure], 1> {
   let arguments = (ins);
   let results = (outs LLVM_VectorOf<AnySignlessInteger>:$res);
   let assemblyFormat = "attr-dict `:` type($res)";

diff  --git a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
index 1abadcc345cd2d..d644088080fdef 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-mask-to-llvm.mlir
@@ -26,7 +26,7 @@ func.func @genbool_var_1d(%arg0: index) -> vector<11xi1> {
 
 // CMP32-LABEL: @genbool_var_1d_scalable(
 // CMP32-SAME: %[[ARG:.*]]: index)
-// CMP32: %[[T0:.*]] = llvm.intr.experimental.stepvector : vector<[11]xi32>
+// CMP32: %[[T0:.*]] = llvm.intr.stepvector : vector<[11]xi32>
 // CMP32: %[[T1:.*]] = arith.index_cast %[[ARG]] : index to i32
 // CMP32: %[[T2:.*]] = llvm.insertelement %[[T1]], %{{.*}}[%{{.*}} : i32] : vector<[11]xi32>
 // CMP32: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<[11]xi32>
@@ -35,7 +35,7 @@ func.func @genbool_var_1d(%arg0: index) -> vector<11xi1> {
 
 // CMP64-LABEL: @genbool_var_1d_scalable(
 // CMP64-SAME: %[[ARG:.*]]: index)
-// CMP64: %[[T0:.*]] = llvm.intr.experimental.stepvector : vector<[11]xi64>
+// CMP64: %[[T0:.*]] = llvm.intr.stepvector : vector<[11]xi64>
 // CMP64: %[[T1:.*]] = arith.index_cast %[[ARG]] : index to i64
 // CMP64: %[[T2:.*]] = llvm.insertelement %[[T1]], %{{.*}}[%{{.*}} : i32] : vector<[11]xi64>
 // CMP64: %[[T3:.*]] = llvm.shufflevector %[[T2]], %{{.*}} [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0] : vector<[11]xi64>

diff  --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index e7da21dbfbcdf3..63bcecd863e95d 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -2630,7 +2630,7 @@ func.func @create_mask_1d_scalable(%a : index) -> vector<[4]xi1> {
 
 // CHECK-LABEL: func @create_mask_1d_scalable
 // CHECK-SAME: %[[arg:.*]]: index
-// CHECK:  %[[indices:.*]] = llvm.intr.experimental.stepvector : vector<[4]xi32>
+// CHECK:  %[[indices:.*]] = llvm.intr.stepvector : vector<[4]xi32>
 // CHECK:  %[[arg_i32:.*]] = arith.index_cast %[[arg]] : index to i32
 // CHECK:  %[[boundsInsert:.*]] = llvm.insertelement %[[arg_i32]], {{.*}} : vector<[4]xi32>
 // CHECK:  %[[bounds:.*]] = llvm.shufflevector %[[boundsInsert]], {{.*}} : vector<[4]xi32>
@@ -3254,7 +3254,7 @@ func.func @vector_from_elements_0d(%a: f32) -> vector<f32> {
 // -----
 
 // CHECK-LABEL: @vector_step_scalable
-// CHECK: %[[STEPVECTOR:.*]] = llvm.intr.experimental.stepvector : vector<[4]xi64>
+// CHECK: %[[STEPVECTOR:.*]] = llvm.intr.stepvector : vector<[4]xi64>
 // CHECK: %[[CAST:.*]] = builtin.unrealized_conversion_cast %[[STEPVECTOR]] : vector<[4]xi64> to vector<[4]xindex>
 // CHECK: return %[[CAST]] : vector<[4]xindex>
 func.func @vector_step_scalable() -> vector<[4]xindex> {

diff  --git a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
index d7620b74089925..30df419822994c 100644
--- a/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
+++ b/mlir/test/Conversion/VectorToSCF/vector-to-scf.mlir
@@ -519,7 +519,7 @@ func.func @transfer_write_scalable(%arg0: memref<?xf32, strided<[?], offset: ?>>
   %0 = llvm.mlir.constant(0 : i32) : i32
   %c0 = arith.constant 0 : index
   %dim = memref.dim %arg0, %c0 : memref<?xf32, strided<[?], offset: ?>>
-  %1 = llvm.intr.experimental.stepvector : vector<[16]xi32>
+  %1 = llvm.intr.stepvector : vector<[16]xi32>
   %2 = arith.index_cast %dim : index to i32
   %3 = llvm.mlir.undef : vector<[16]xi32>
   %4 = llvm.insertelement %2, %3[%0 : i32] : vector<[16]xi32>

diff  --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir
index aa5483572dcf94..8e812108c60552 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/outerproduct-f64.mlir
@@ -23,7 +23,7 @@ func.func @test_outerproduct_no_accumulator_2x2xf64() {
   %c0 = arith.constant 0 : index
   %ones = arith.constant dense<1> : vector<[2]xi32>
 
-  %step_vector = llvm.intr.experimental.stepvector : vector<[2]xi32>
+  %step_vector = llvm.intr.stepvector : vector<[2]xi32>
   %vector_i32 = arith.addi %step_vector, %ones : vector<[2]xi32>
   %vector = arith.sitofp %vector_i32 : vector<[2]xi32> to vector<[2]xf64>
 
@@ -53,7 +53,7 @@ func.func @test_outerproduct_with_accumulator_2x2xf64() {
   %f10 = arith.constant 10.0 : f64
 
   %acc = vector.splat %f10 : vector<[2]x[2]xf64>
-  %step_vector = llvm.intr.experimental.stepvector : vector<[2]xi32>
+  %step_vector = llvm.intr.stepvector : vector<[2]xi32>
   %vector_i32 = arith.addi %step_vector, %ones : vector<[2]xi32>
   %vector = arith.sitofp %vector_i32 : vector<[2]xi32> to vector<[2]xf64>
 
@@ -78,7 +78,7 @@ func.func @test_masked_outerproduct_no_accumulator_2x2xf64() {
   %ones = arith.constant dense<1> : vector<[2]xi32>
   %f10 = arith.constant 10.0 : f64
 
-  %step_vector = llvm.intr.experimental.stepvector : vector<[2]xi32>
+  %step_vector = llvm.intr.stepvector : vector<[2]xi32>
   %vector_i32 = arith.addi %step_vector, %ones : vector<[2]xi32>
   %vector = arith.sitofp %vector_i32 : vector<[2]xi32> to vector<[2]xf64>
 
@@ -109,7 +109,7 @@ func.func @test_masked_outerproduct_with_accumulator_2x2xf64() {
   %f10 = arith.constant 10.0 : f64
 
   %acc = vector.splat %f10 : vector<[2]x[2]xf64>
-  %step_vector = llvm.intr.experimental.stepvector : vector<[2]xi32>
+  %step_vector = llvm.intr.stepvector : vector<[2]xi32>
   %vector_i32 = arith.addi %step_vector, %ones : vector<[2]xi32>
   %vector = arith.sitofp %vector_i32 : vector<[2]xi32> to vector<[2]xf64>
 

diff  --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-deinterleave.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-deinterleave.mlir
index c071bb0b17f447..f29740d4f049da 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-deinterleave.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSVE/test-scalable-deinterleave.mlir
@@ -18,7 +18,7 @@ func.func @entry() {
 }
 
 func.func @test_deinterleave() {
-  %step_vector = llvm.intr.experimental.stepvector : vector<[4]xi8>
+  %step_vector = llvm.intr.stepvector : vector<[4]xi8>
   vector.print %step_vector : vector<[4]xi8>
   // CHECK: ( 0, 1, 2, 3, 4, 5, 6, 7 )
   %v1, %v2 = vector.deinterleave %step_vector : vector<[4]xi8> -> vector<[2]xi8>

diff  --git a/mlir/test/Target/LLVMIR/llvmir-invalid.mlir b/mlir/test/Target/LLVMIR/llvmir-invalid.mlir
index 0e2afe6fb004d8..af0981440a1776 100644
--- a/mlir/test/Target/LLVMIR/llvmir-invalid.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir-invalid.mlir
@@ -303,7 +303,7 @@ llvm.func @masked_scatter_intr_wrong_type_scalable(%vec : vector<[7]xf32>, %ptrs
 
 llvm.func @stepvector_intr_wrong_type() -> vector<7xf32> {
   // expected-error @below{{op result #0 must be LLVM dialect-compatible vector of signless integer, but got 'vector<7xf32>'}}
-  %0 = llvm.intr.experimental.stepvector : vector<7xf32>
+  %0 = llvm.intr.stepvector : vector<7xf32>
   llvm.return %0 : vector<7xf32>
 }
 


        


More information about the Mlir-commits mailing list