[llvm] 9b59107 - [RISCV][VP] Mangle pointers in vp.load and vp.store tests

Fraser Cormack via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 2 09:55:25 PDT 2021


Author: Fraser Cormack
Date: 2021-11-02T16:46:32Z
New Revision: 9b591078cf67f2731d9cc5f92605312ce15cfd5d

URL: https://github.com/llvm/llvm-project/commit/9b591078cf67f2731d9cc5f92605312ce15cfd5d
DIFF: https://github.com/llvm/llvm-project/commit/9b591078cf67f2731d9cc5f92605312ce15cfd5d.diff

LOG: [RISCV][VP] Mangle pointers in vp.load and vp.store tests

Although this isn't required, it better matches the suggested syntax as
per the documentation work ongoing in D112930.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D112939

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
    llvm/test/CodeGen/RISCV/rvv/vpload.ll
    llvm/test/CodeGen/RISCV/rvv/vpstore.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
index 35332b374dd9..a8348fde944e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s
 
-declare <2 x i8> @llvm.vp.load.v2i8(<2 x i8>*, <2 x i1>, i32)
+declare <2 x i8> @llvm.vp.load.v2i8.p0v2i8(<2 x i8>*, <2 x i1>, i32)
 
 define <2 x i8> @vpload_v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v2i8:
@@ -12,11 +12,11 @@ define <2 x i8> @vpload_v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <2 x i8> @llvm.vp.load.v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 %evl)
+  %load = call <2 x i8> @llvm.vp.load.v2i8.p0v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 %evl)
   ret <2 x i8> %load
 }
 
-declare <4 x i8> @llvm.vp.load.v4i8(<4 x i8>*, <4 x i1>, i32)
+declare <4 x i8> @llvm.vp.load.v4i8.p0v4i8(<4 x i8>*, <4 x i1>, i32)
 
 define <4 x i8> @vpload_v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v4i8:
@@ -24,7 +24,7 @@ define <4 x i8> @vpload_v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <4 x i8> @llvm.vp.load.v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 %evl)
+  %load = call <4 x i8> @llvm.vp.load.v4i8.p0v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 %evl)
   ret <4 x i8> %load
 }
 
@@ -38,11 +38,11 @@ define <4 x i8> @vpload_v4i8_allones_mask(<4 x i8>* %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %a = insertelement <4 x i1> undef, i1 true, i32 0
   %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
-  %load = call <4 x i8> @llvm.vp.load.v4i8(<4 x i8>* %ptr, <4 x i1> %b, i32 %evl)
+  %load = call <4 x i8> @llvm.vp.load.v4i8.p0v4i8(<4 x i8>* %ptr, <4 x i1> %b, i32 %evl)
   ret <4 x i8> %load
 }
 
-declare <8 x i8> @llvm.vp.load.v8i8(<8 x i8>*, <8 x i1>, i32)
+declare <8 x i8> @llvm.vp.load.v8i8.p0v8i8(<8 x i8>*, <8 x i1>, i32)
 
 define <8 x i8> @vpload_v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v8i8:
@@ -50,11 +50,11 @@ define <8 x i8> @vpload_v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <8 x i8> @llvm.vp.load.v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 %evl)
+  %load = call <8 x i8> @llvm.vp.load.v8i8.p0v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 %evl)
   ret <8 x i8> %load
 }
 
-declare <2 x i16> @llvm.vp.load.v2i16(<2 x i16>*, <2 x i1>, i32)
+declare <2 x i16> @llvm.vp.load.v2i16.p0v2i16(<2 x i16>*, <2 x i1>, i32)
 
 define <2 x i16> @vpload_v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v2i16:
@@ -62,11 +62,11 @@ define <2 x i16> @vpload_v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <2 x i16> @llvm.vp.load.v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 %evl)
+  %load = call <2 x i16> @llvm.vp.load.v2i16.p0v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 %evl)
   ret <2 x i16> %load
 }
 
-declare <4 x i16> @llvm.vp.load.v4i16(<4 x i16>*, <4 x i1>, i32)
+declare <4 x i16> @llvm.vp.load.v4i16.p0v4i16(<4 x i16>*, <4 x i1>, i32)
 
 define <4 x i16> @vpload_v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v4i16:
@@ -74,11 +74,11 @@ define <4 x i16> @vpload_v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <4 x i16> @llvm.vp.load.v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 %evl)
+  %load = call <4 x i16> @llvm.vp.load.v4i16.p0v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 %evl)
   ret <4 x i16> %load
 }
 
-declare <8 x i16> @llvm.vp.load.v8i16(<8 x i16>*, <8 x i1>, i32)
+declare <8 x i16> @llvm.vp.load.v8i16.p0v8i16(<8 x i16>*, <8 x i1>, i32)
 
 define <8 x i16> @vpload_v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v8i16:
@@ -86,7 +86,7 @@ define <8 x i16> @vpload_v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <8 x i16> @llvm.vp.load.v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 %evl)
+  %load = call <8 x i16> @llvm.vp.load.v8i16.p0v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 %evl)
   ret <8 x i16> %load
 }
 
@@ -100,11 +100,11 @@ define <8 x i16> @vpload_v8i16_allones_mask(<8 x i16>* %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %a = insertelement <8 x i1> undef, i1 true, i32 0
   %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
-  %load = call <8 x i16> @llvm.vp.load.v8i16(<8 x i16>* %ptr, <8 x i1> %b, i32 %evl)
+  %load = call <8 x i16> @llvm.vp.load.v8i16.p0v8i16(<8 x i16>* %ptr, <8 x i1> %b, i32 %evl)
   ret <8 x i16> %load
 }
 
-declare <2 x i32> @llvm.vp.load.v2i32(<2 x i32>*, <2 x i1>, i32)
+declare <2 x i32> @llvm.vp.load.v2i32.p0v2i32(<2 x i32>*, <2 x i1>, i32)
 
 define <2 x i32> @vpload_v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v2i32:
@@ -112,11 +112,11 @@ define <2 x i32> @vpload_v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <2 x i32> @llvm.vp.load.v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 %evl)
+  %load = call <2 x i32> @llvm.vp.load.v2i32.p0v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 %evl)
   ret <2 x i32> %load
 }
 
-declare <4 x i32> @llvm.vp.load.v4i32(<4 x i32>*, <4 x i1>, i32)
+declare <4 x i32> @llvm.vp.load.v4i32.p0v4i32(<4 x i32>*, <4 x i1>, i32)
 
 define <4 x i32> @vpload_v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v4i32:
@@ -124,11 +124,11 @@ define <4 x i32> @vpload_v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <4 x i32> @llvm.vp.load.v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 %evl)
+  %load = call <4 x i32> @llvm.vp.load.v4i32.p0v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 %evl)
   ret <4 x i32> %load
 }
 
-declare <8 x i32> @llvm.vp.load.v8i32(<8 x i32>*, <8 x i1>, i32)
+declare <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>*, <8 x i1>, i32)
 
 define <8 x i32> @vpload_v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v8i32:
@@ -136,7 +136,7 @@ define <8 x i32> @vpload_v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <8 x i32> @llvm.vp.load.v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 %evl)
+  %load = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 %evl)
   ret <8 x i32> %load
 }
 
@@ -150,11 +150,11 @@ define <8 x i32> @vpload_v8i32_allones_mask(<8 x i32>* %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %a = insertelement <8 x i1> undef, i1 true, i32 0
   %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
-  %load = call <8 x i32> @llvm.vp.load.v8i32(<8 x i32>* %ptr, <8 x i1> %b, i32 %evl)
+  %load = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %ptr, <8 x i1> %b, i32 %evl)
   ret <8 x i32> %load
 }
 
-declare <2 x i64> @llvm.vp.load.v2i64(<2 x i64>*, <2 x i1>, i32)
+declare <2 x i64> @llvm.vp.load.v2i64.p0v2i64(<2 x i64>*, <2 x i1>, i32)
 
 define <2 x i64> @vpload_v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v2i64:
@@ -162,11 +162,11 @@ define <2 x i64> @vpload_v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <2 x i64> @llvm.vp.load.v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 %evl)
+  %load = call <2 x i64> @llvm.vp.load.v2i64.p0v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 %evl)
   ret <2 x i64> %load
 }
 
-declare <4 x i64> @llvm.vp.load.v4i64(<4 x i64>*, <4 x i1>, i32)
+declare <4 x i64> @llvm.vp.load.v4i64.p0v4i64(<4 x i64>*, <4 x i1>, i32)
 
 define <4 x i64> @vpload_v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v4i64:
@@ -174,7 +174,7 @@ define <4 x i64> @vpload_v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <4 x i64> @llvm.vp.load.v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 %evl)
+  %load = call <4 x i64> @llvm.vp.load.v4i64.p0v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 %evl)
   ret <4 x i64> %load
 }
 
@@ -188,11 +188,11 @@ define <4 x i64> @vpload_v4i64_allones_mask(<4 x i64>* %ptr, i32 zeroext %evl) {
 ; CHECK-NEXT:    ret
   %a = insertelement <4 x i1> undef, i1 true, i32 0
   %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
-  %load = call <4 x i64> @llvm.vp.load.v4i64(<4 x i64>* %ptr, <4 x i1> %b, i32 %evl)
+  %load = call <4 x i64> @llvm.vp.load.v4i64.p0v4i64(<4 x i64>* %ptr, <4 x i1> %b, i32 %evl)
   ret <4 x i64> %load
 }
 
-declare <8 x i64> @llvm.vp.load.v8i64(<8 x i64>*, <8 x i1>, i32)
+declare <8 x i64> @llvm.vp.load.v8i64.p0v8i64(<8 x i64>*, <8 x i1>, i32)
 
 define <8 x i64> @vpload_v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v8i64:
@@ -200,11 +200,11 @@ define <8 x i64> @vpload_v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <8 x i64> @llvm.vp.load.v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 %evl)
+  %load = call <8 x i64> @llvm.vp.load.v8i64.p0v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 %evl)
   ret <8 x i64> %load
 }
 
-declare <2 x half> @llvm.vp.load.v2f16(<2 x half>*, <2 x i1>, i32)
+declare <2 x half> @llvm.vp.load.v2f16.p0v2f16(<2 x half>*, <2 x i1>, i32)
 
 define <2 x half> @vpload_v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v2f16:
@@ -212,7 +212,7 @@ define <2 x half> @vpload_v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <2 x half> @llvm.vp.load.v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 %evl)
+  %load = call <2 x half> @llvm.vp.load.v2f16.p0v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 %evl)
   ret <2 x half> %load
 }
 
@@ -226,11 +226,11 @@ define <2 x half> @vpload_v2f16_allones_mask(<2 x half>* %ptr, i32 zeroext %evl)
 ; CHECK-NEXT:    ret
   %a = insertelement <2 x i1> undef, i1 true, i32 0
   %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
-  %load = call <2 x half> @llvm.vp.load.v2f16(<2 x half>* %ptr, <2 x i1> %b, i32 %evl)
+  %load = call <2 x half> @llvm.vp.load.v2f16.p0v2f16(<2 x half>* %ptr, <2 x i1> %b, i32 %evl)
   ret <2 x half> %load
 }
 
-declare <4 x half> @llvm.vp.load.v4f16(<4 x half>*, <4 x i1>, i32)
+declare <4 x half> @llvm.vp.load.v4f16.p0v4f16(<4 x half>*, <4 x i1>, i32)
 
 define <4 x half> @vpload_v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v4f16:
@@ -238,11 +238,11 @@ define <4 x half> @vpload_v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <4 x half> @llvm.vp.load.v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 %evl)
+  %load = call <4 x half> @llvm.vp.load.v4f16.p0v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 %evl)
   ret <4 x half> %load
 }
 
-declare <8 x half> @llvm.vp.load.v8f16(<8 x half>*, <8 x i1>, i32)
+declare <8 x half> @llvm.vp.load.v8f16.p0v8f16(<8 x half>*, <8 x i1>, i32)
 
 define <8 x half> @vpload_v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v8f16:
@@ -250,11 +250,11 @@ define <8 x half> @vpload_v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 zeroext %evl)
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <8 x half> @llvm.vp.load.v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 %evl)
+  %load = call <8 x half> @llvm.vp.load.v8f16.p0v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 %evl)
   ret <8 x half> %load
 }
 
-declare <2 x float> @llvm.vp.load.v2f32(<2 x float>*, <2 x i1>, i32)
+declare <2 x float> @llvm.vp.load.v2f32.p0v2f32(<2 x float>*, <2 x i1>, i32)
 
 define <2 x float> @vpload_v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v2f32:
@@ -262,11 +262,11 @@ define <2 x float> @vpload_v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <2 x float> @llvm.vp.load.v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 %evl)
+  %load = call <2 x float> @llvm.vp.load.v2f32.p0v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 %evl)
   ret <2 x float> %load
 }
 
-declare <4 x float> @llvm.vp.load.v4f32(<4 x float>*, <4 x i1>, i32)
+declare <4 x float> @llvm.vp.load.v4f32.p0v4f32(<4 x float>*, <4 x i1>, i32)
 
 define <4 x float> @vpload_v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v4f32:
@@ -274,11 +274,11 @@ define <4 x float> @vpload_v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <4 x float> @llvm.vp.load.v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 %evl)
+  %load = call <4 x float> @llvm.vp.load.v4f32.p0v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 %evl)
   ret <4 x float> %load
 }
 
-declare <8 x float> @llvm.vp.load.v8f32(<8 x float>*, <8 x i1>, i32)
+declare <8 x float> @llvm.vp.load.v8f32.p0v8f32(<8 x float>*, <8 x i1>, i32)
 
 define <8 x float> @vpload_v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v8f32:
@@ -286,7 +286,7 @@ define <8 x float> @vpload_v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 zeroext %ev
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <8 x float> @llvm.vp.load.v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 %evl)
+  %load = call <8 x float> @llvm.vp.load.v8f32.p0v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 %evl)
   ret <8 x float> %load
 }
 
@@ -300,11 +300,11 @@ define <8 x float> @vpload_v8f32_allones_mask(<8 x float>* %ptr, i32 zeroext %ev
 ; CHECK-NEXT:    ret
   %a = insertelement <8 x i1> undef, i1 true, i32 0
   %b = shufflevector <8 x i1> %a, <8 x i1> poison, <8 x i32> zeroinitializer
-  %load = call <8 x float> @llvm.vp.load.v8f32(<8 x float>* %ptr, <8 x i1> %b, i32 %evl)
+  %load = call <8 x float> @llvm.vp.load.v8f32.p0v8f32(<8 x float>* %ptr, <8 x i1> %b, i32 %evl)
   ret <8 x float> %load
 }
 
-declare <2 x double> @llvm.vp.load.v2f64(<2 x double>*, <2 x i1>, i32)
+declare <2 x double> @llvm.vp.load.v2f64.p0v2f64(<2 x double>*, <2 x i1>, i32)
 
 define <2 x double> @vpload_v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v2f64:
@@ -312,11 +312,11 @@ define <2 x double> @vpload_v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <2 x double> @llvm.vp.load.v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 %evl)
+  %load = call <2 x double> @llvm.vp.load.v2f64.p0v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 %evl)
   ret <2 x double> %load
 }
 
-declare <4 x double> @llvm.vp.load.v4f64(<4 x double>*, <4 x i1>, i32)
+declare <4 x double> @llvm.vp.load.v4f64.p0v4f64(<4 x double>*, <4 x i1>, i32)
 
 define <4 x double> @vpload_v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v4f64:
@@ -324,7 +324,7 @@ define <4 x double> @vpload_v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <4 x double> @llvm.vp.load.v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 %evl)
+  %load = call <4 x double> @llvm.vp.load.v4f64.p0v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 %evl)
   ret <4 x double> %load
 }
 
@@ -338,11 +338,11 @@ define <4 x double> @vpload_v4f64_allones_mask(<4 x double>* %ptr, i32 zeroext %
 ; CHECK-NEXT:    ret
   %a = insertelement <4 x i1> undef, i1 true, i32 0
   %b = shufflevector <4 x i1> %a, <4 x i1> poison, <4 x i32> zeroinitializer
-  %load = call <4 x double> @llvm.vp.load.v4f64(<4 x double>* %ptr, <4 x i1> %b, i32 %evl)
+  %load = call <4 x double> @llvm.vp.load.v4f64.p0v4f64(<4 x double>* %ptr, <4 x i1> %b, i32 %evl)
   ret <4 x double> %load
 }
 
-declare <8 x double> @llvm.vp.load.v8f64(<8 x double>*, <8 x i1>, i32)
+declare <8 x double> @llvm.vp.load.v8f64.p0v8f64(<8 x double>*, <8 x i1>, i32)
 
 define <8 x double> @vpload_v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_v8f64:
@@ -350,6 +350,6 @@ define <8 x double> @vpload_v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 zeroext %
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <8 x double> @llvm.vp.load.v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 %evl)
+  %load = call <8 x double> @llvm.vp.load.v8f64.p0v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 %evl)
   ret <8 x double> %load
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
index 28e2dc89df11..8a3fb38c8206 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v -riscv-v-vector-bits-min=128 \
 ; RUN:   -verify-machineinstrs < %s | FileCheck %s
 
-declare void @llvm.vp.store.v2i8(<2 x i8>, <2 x i8>*, <2 x i1>, i32)
+declare void @llvm.vp.store.v2i8.p0v2i8(<2 x i8>, <2 x i8>*, <2 x i1>, i32)
 
 define void @vpstore_v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v2i8:
@@ -12,11 +12,11 @@ define void @vpstore_v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v2i8.p0v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v4i8(<4 x i8>, <4 x i8>*, <4 x i1>, i32)
+declare void @llvm.vp.store.v4i8.p0v4i8(<4 x i8>, <4 x i8>*, <4 x i1>, i32)
 
 define void @vpstore_v4i8(<4 x i8> %val, <4 x i8>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v4i8:
@@ -24,11 +24,11 @@ define void @vpstore_v4i8(<4 x i8> %val, <4 x i8>* %ptr, <4 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v4i8(<4 x i8> %val, <4 x i8>* %ptr, <4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v4i8.p0v4i8(<4 x i8> %val, <4 x i8>* %ptr, <4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v8i8(<8 x i8>, <8 x i8>*, <8 x i1>, i32)
+declare void @llvm.vp.store.v8i8.p0v8i8(<8 x i8>, <8 x i8>*, <8 x i1>, i32)
 
 define void @vpstore_v8i8(<8 x i8> %val, <8 x i8>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v8i8:
@@ -36,11 +36,11 @@ define void @vpstore_v8i8(<8 x i8> %val, <8 x i8>* %ptr, <8 x i1> %m, i32 zeroex
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v8i8(<8 x i8> %val, <8 x i8>* %ptr, <8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v8i8.p0v8i8(<8 x i8> %val, <8 x i8>* %ptr, <8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v2i16(<2 x i16>, <2 x i16>*, <2 x i1>, i32)
+declare void @llvm.vp.store.v2i16.p0v2i16(<2 x i16>, <2 x i16>*, <2 x i1>, i32)
 
 define void @vpstore_v2i16(<2 x i16> %val, <2 x i16>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v2i16:
@@ -48,11 +48,11 @@ define void @vpstore_v2i16(<2 x i16> %val, <2 x i16>* %ptr, <2 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v2i16(<2 x i16> %val, <2 x i16>* %ptr, <2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v2i16.p0v2i16(<2 x i16> %val, <2 x i16>* %ptr, <2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v4i16(<4 x i16>, <4 x i16>*, <4 x i1>, i32)
+declare void @llvm.vp.store.v4i16.p0v4i16(<4 x i16>, <4 x i16>*, <4 x i1>, i32)
 
 define void @vpstore_v4i16(<4 x i16> %val, <4 x i16>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v4i16:
@@ -60,11 +60,11 @@ define void @vpstore_v4i16(<4 x i16> %val, <4 x i16>* %ptr, <4 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v4i16(<4 x i16> %val, <4 x i16>* %ptr, <4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v4i16.p0v4i16(<4 x i16> %val, <4 x i16>* %ptr, <4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v8i16(<8 x i16>, <8 x i16>*, <8 x i1>, i32)
+declare void @llvm.vp.store.v8i16.p0v8i16(<8 x i16>, <8 x i16>*, <8 x i1>, i32)
 
 define void @vpstore_v8i16(<8 x i16> %val, <8 x i16>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v8i16:
@@ -72,11 +72,11 @@ define void @vpstore_v8i16(<8 x i16> %val, <8 x i16>* %ptr, <8 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v8i16(<8 x i16> %val, <8 x i16>* %ptr, <8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v8i16.p0v8i16(<8 x i16> %val, <8 x i16>* %ptr, <8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v2i32(<2 x i32>, <2 x i32>*, <2 x i1>, i32)
+declare void @llvm.vp.store.v2i32.p0v2i32(<2 x i32>, <2 x i32>*, <2 x i1>, i32)
 
 define void @vpstore_v2i32(<2 x i32> %val, <2 x i32>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v2i32:
@@ -84,11 +84,11 @@ define void @vpstore_v2i32(<2 x i32> %val, <2 x i32>* %ptr, <2 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v2i32(<2 x i32> %val, <2 x i32>* %ptr, <2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v2i32.p0v2i32(<2 x i32> %val, <2 x i32>* %ptr, <2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v4i32(<4 x i32>, <4 x i32>*, <4 x i1>, i32)
+declare void @llvm.vp.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, <4 x i1>, i32)
 
 define void @vpstore_v4i32(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v4i32:
@@ -96,11 +96,11 @@ define void @vpstore_v4i32(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v4i32(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v4i32.p0v4i32(<4 x i32> %val, <4 x i32>* %ptr, <4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v8i32(<8 x i32>, <8 x i32>*, <8 x i1>, i32)
+declare void @llvm.vp.store.v8i32.p0v8i32(<8 x i32>, <8 x i32>*, <8 x i1>, i32)
 
 define void @vpstore_v8i32(<8 x i32> %val, <8 x i32>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v8i32:
@@ -108,11 +108,11 @@ define void @vpstore_v8i32(<8 x i32> %val, <8 x i32>* %ptr, <8 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v8i32(<8 x i32> %val, <8 x i32>* %ptr, <8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v8i32.p0v8i32(<8 x i32> %val, <8 x i32>* %ptr, <8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v2i64(<2 x i64>, <2 x i64>*, <2 x i1>, i32)
+declare void @llvm.vp.store.v2i64.p0v2i64(<2 x i64>, <2 x i64>*, <2 x i1>, i32)
 
 define void @vpstore_v2i64(<2 x i64> %val, <2 x i64>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v2i64:
@@ -120,11 +120,11 @@ define void @vpstore_v2i64(<2 x i64> %val, <2 x i64>* %ptr, <2 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v2i64(<2 x i64> %val, <2 x i64>* %ptr, <2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v2i64.p0v2i64(<2 x i64> %val, <2 x i64>* %ptr, <2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v4i64(<4 x i64>, <4 x i64>*, <4 x i1>, i32)
+declare void @llvm.vp.store.v4i64.p0v4i64(<4 x i64>, <4 x i64>*, <4 x i1>, i32)
 
 define void @vpstore_v4i64(<4 x i64> %val, <4 x i64>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v4i64:
@@ -132,11 +132,11 @@ define void @vpstore_v4i64(<4 x i64> %val, <4 x i64>* %ptr, <4 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v4i64(<4 x i64> %val, <4 x i64>* %ptr, <4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v4i64.p0v4i64(<4 x i64> %val, <4 x i64>* %ptr, <4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v8i64(<8 x i64>, <8 x i64>*, <8 x i1>, i32)
+declare void @llvm.vp.store.v8i64.p0v8i64(<8 x i64>, <8 x i64>*, <8 x i1>, i32)
 
 define void @vpstore_v8i64(<8 x i64> %val, <8 x i64>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v8i64:
@@ -144,11 +144,11 @@ define void @vpstore_v8i64(<8 x i64> %val, <8 x i64>* %ptr, <8 x i1> %m, i32 zer
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v8i64(<8 x i64> %val, <8 x i64>* %ptr, <8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v8i64.p0v8i64(<8 x i64> %val, <8 x i64>* %ptr, <8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v2f16(<2 x half>, <2 x half>*, <2 x i1>, i32)
+declare void @llvm.vp.store.v2f16.p0v2f16(<2 x half>, <2 x half>*, <2 x i1>, i32)
 
 define void @vpstore_v2f16(<2 x half> %val, <2 x half>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v2f16:
@@ -156,11 +156,11 @@ define void @vpstore_v2f16(<2 x half> %val, <2 x half>* %ptr, <2 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v2f16(<2 x half> %val, <2 x half>* %ptr, <2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v2f16.p0v2f16(<2 x half> %val, <2 x half>* %ptr, <2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v4f16(<4 x half>, <4 x half>*, <4 x i1>, i32)
+declare void @llvm.vp.store.v4f16.p0v4f16(<4 x half>, <4 x half>*, <4 x i1>, i32)
 
 define void @vpstore_v4f16(<4 x half> %val, <4 x half>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v4f16:
@@ -168,11 +168,11 @@ define void @vpstore_v4f16(<4 x half> %val, <4 x half>* %ptr, <4 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v4f16(<4 x half> %val, <4 x half>* %ptr, <4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v4f16.p0v4f16(<4 x half> %val, <4 x half>* %ptr, <4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v8f16(<8 x half>, <8 x half>*, <8 x i1>, i32)
+declare void @llvm.vp.store.v8f16.p0v8f16(<8 x half>, <8 x half>*, <8 x i1>, i32)
 
 define void @vpstore_v8f16(<8 x half> %val, <8 x half>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v8f16:
@@ -180,11 +180,11 @@ define void @vpstore_v8f16(<8 x half> %val, <8 x half>* %ptr, <8 x i1> %m, i32 z
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v8f16(<8 x half> %val, <8 x half>* %ptr, <8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v8f16.p0v8f16(<8 x half> %val, <8 x half>* %ptr, <8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v2f32(<2 x float>, <2 x float>*, <2 x i1>, i32)
+declare void @llvm.vp.store.v2f32.p0v2f32(<2 x float>, <2 x float>*, <2 x i1>, i32)
 
 define void @vpstore_v2f32(<2 x float> %val, <2 x float>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v2f32:
@@ -192,11 +192,11 @@ define void @vpstore_v2f32(<2 x float> %val, <2 x float>* %ptr, <2 x i1> %m, i32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v2f32(<2 x float> %val, <2 x float>* %ptr, <2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v2f32.p0v2f32(<2 x float> %val, <2 x float>* %ptr, <2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v4f32(<4 x float>, <4 x float>*, <4 x i1>, i32)
+declare void @llvm.vp.store.v4f32.p0v4f32(<4 x float>, <4 x float>*, <4 x i1>, i32)
 
 define void @vpstore_v4f32(<4 x float> %val, <4 x float>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v4f32:
@@ -204,11 +204,11 @@ define void @vpstore_v4f32(<4 x float> %val, <4 x float>* %ptr, <4 x i1> %m, i32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v4f32(<4 x float> %val, <4 x float>* %ptr, <4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %ptr, <4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v8f32(<8 x float>, <8 x float>*, <8 x i1>, i32)
+declare void @llvm.vp.store.v8f32.p0v8f32(<8 x float>, <8 x float>*, <8 x i1>, i32)
 
 define void @vpstore_v8f32(<8 x float> %val, <8 x float>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v8f32:
@@ -216,11 +216,11 @@ define void @vpstore_v8f32(<8 x float> %val, <8 x float>* %ptr, <8 x i1> %m, i32
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v8f32(<8 x float> %val, <8 x float>* %ptr, <8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v8f32.p0v8f32(<8 x float> %val, <8 x float>* %ptr, <8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v2f64(<2 x double>, <2 x double>*, <2 x i1>, i32)
+declare void @llvm.vp.store.v2f64.p0v2f64(<2 x double>, <2 x double>*, <2 x i1>, i32)
 
 define void @vpstore_v2f64(<2 x double> %val, <2 x double>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v2f64:
@@ -228,11 +228,11 @@ define void @vpstore_v2f64(<2 x double> %val, <2 x double>* %ptr, <2 x i1> %m, i
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v2f64(<2 x double> %val, <2 x double>* %ptr, <2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v2f64.p0v2f64(<2 x double> %val, <2 x double>* %ptr, <2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v4f64(<4 x double>, <4 x double>*, <4 x i1>, i32)
+declare void @llvm.vp.store.v4f64.p0v4f64(<4 x double>, <4 x double>*, <4 x i1>, i32)
 
 define void @vpstore_v4f64(<4 x double> %val, <4 x double>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v4f64:
@@ -240,11 +240,11 @@ define void @vpstore_v4f64(<4 x double> %val, <4 x double>* %ptr, <4 x i1> %m, i
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v4f64(<4 x double> %val, <4 x double>* %ptr, <4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v4f64.p0v4f64(<4 x double> %val, <4 x double>* %ptr, <4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.v8f64(<8 x double>, <8 x double>*, <8 x i1>, i32)
+declare void @llvm.vp.store.v8f64.p0v8f64(<8 x double>, <8 x double>*, <8 x i1>, i32)
 
 define void @vpstore_v8f64(<8 x double> %val, <8 x double>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_v8f64:
@@ -252,7 +252,7 @@ define void @vpstore_v8f64(<8 x double> %val, <8 x double>* %ptr, <8 x i1> %m, i
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.v8f64(<8 x double> %val, <8 x double>* %ptr, <8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.v8f64.p0v8f64(<8 x double> %val, <8 x double>* %ptr, <8 x i1> %m, i32 %evl)
   ret void
 }
 
@@ -264,6 +264,6 @@ define void @vpstore_v2i8_allones_mask(<2 x i8> %val, <2 x i8>* %ptr, i32 zeroex
 ; CHECK-NEXT:    ret
   %a = insertelement <2 x i1> undef, i1 true, i32 0
   %b = shufflevector <2 x i1> %a, <2 x i1> poison, <2 x i32> zeroinitializer
-  call void @llvm.vp.store.v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %b, i32 %evl)
+  call void @llvm.vp.store.v2i8.p0v2i8(<2 x i8> %val, <2 x i8>* %ptr, <2 x i1> %b, i32 %evl)
   ret void
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
index e9a0c4431681..3fef4f9ac32e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-declare <vscale x 1 x i8> @llvm.vp.load.nxv1i8(<vscale x 1 x i8>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0nxv1i8(<vscale x 1 x i8>*, <vscale x 1 x i1>, i32)
 
 define <vscale x 1 x i8> @vpload_nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i8:
@@ -12,7 +12,7 @@ define <vscale x 1 x i8> @vpload_nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i8> %load
 }
 
@@ -26,11 +26,11 @@ define <vscale x 1 x i8> @vpload_nxv1i8_allones_mask(<vscale x 1 x i8>* %ptr, i3
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
   %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
+  %load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
   ret <vscale x 1 x i8> %load
 }
 
-declare <vscale x 2 x i8> @llvm.vp.load.nxv2i8(<vscale x 2 x i8>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0nxv2i8(<vscale x 2 x i8>*, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i8> @vpload_nxv2i8(<vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2i8:
@@ -38,11 +38,11 @@ define <vscale x 2 x i8> @vpload_nxv2i8(<vscale x 2 x i8>* %ptr, <vscale x 2 x i
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i8> @llvm.vp.load.nxv2i8(<vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0nxv2i8(<vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i8> %load
 }
 
-declare <vscale x 4 x i8> @llvm.vp.load.nxv4i8(<vscale x 4 x i8>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0nxv4i8(<vscale x 4 x i8>*, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x i8> @vpload_nxv4i8(<vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4i8:
@@ -50,11 +50,11 @@ define <vscale x 4 x i8> @vpload_nxv4i8(<vscale x 4 x i8>* %ptr, <vscale x 4 x i
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i8> @llvm.vp.load.nxv4i8(<vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0nxv4i8(<vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i8> %load
 }
 
-declare <vscale x 8 x i8> @llvm.vp.load.nxv8i8(<vscale x 8 x i8>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0nxv8i8(<vscale x 8 x i8>*, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x i8> @vpload_nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8i8:
@@ -62,7 +62,7 @@ define <vscale x 8 x i8> @vpload_nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vle8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i8> %load
 }
 
@@ -76,11 +76,11 @@ define <vscale x 8 x i8> @vpload_nxv8i8_allones_mask(<vscale x 8 x i8>* %ptr, i3
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
   %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %b, i32 %evl)
+  %load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %b, i32 %evl)
   ret <vscale x 8 x i8> %load
 }
 
-declare <vscale x 1 x i16> @llvm.vp.load.nxv1i16(<vscale x 1 x i16>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0nxv1i16(<vscale x 1 x i16>*, <vscale x 1 x i1>, i32)
 
 define <vscale x 1 x i16> @vpload_nxv1i16(<vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i16:
@@ -88,11 +88,11 @@ define <vscale x 1 x i16> @vpload_nxv1i16(<vscale x 1 x i16>* %ptr, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i16> @llvm.vp.load.nxv1i16(<vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0nxv1i16(<vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i16> %load
 }
 
-declare <vscale x 2 x i16> @llvm.vp.load.nxv2i16(<vscale x 2 x i16>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0nxv2i16(<vscale x 2 x i16>*, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i16> @vpload_nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2i16:
@@ -100,7 +100,7 @@ define <vscale x 2 x i16> @vpload_nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i16> %load
 }
 
@@ -114,11 +114,11 @@ define <vscale x 2 x i16> @vpload_nxv2i16_allones_mask(<vscale x 2 x i16>* %ptr,
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
   %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %b, i32 %evl)
+  %load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %b, i32 %evl)
   ret <vscale x 2 x i16> %load
 }
 
-declare <vscale x 4 x i16> @llvm.vp.load.nxv4i16(<vscale x 4 x i16>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0nxv4i16(<vscale x 4 x i16>*, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x i16> @vpload_nxv4i16(<vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4i16:
@@ -126,11 +126,11 @@ define <vscale x 4 x i16> @vpload_nxv4i16(<vscale x 4 x i16>* %ptr, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16(<vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0nxv4i16(<vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i16> %load
 }
 
-declare <vscale x 8 x i16> @llvm.vp.load.nxv8i16(<vscale x 8 x i16>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>*, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x i16> @vpload_nxv8i16(<vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8i16:
@@ -138,11 +138,11 @@ define <vscale x 8 x i16> @vpload_nxv8i16(<vscale x 8 x i16>* %ptr, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16(<vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i16> %load
 }
 
-declare <vscale x 1 x i32> @llvm.vp.load.nxv1i32(<vscale x 1 x i32>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0nxv1i32(<vscale x 1 x i32>*, <vscale x 1 x i1>, i32)
 
 define <vscale x 1 x i32> @vpload_nxv1i32(<vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i32:
@@ -150,11 +150,11 @@ define <vscale x 1 x i32> @vpload_nxv1i32(<vscale x 1 x i32>* %ptr, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i32> @llvm.vp.load.nxv1i32(<vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0nxv1i32(<vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i32> %load
 }
 
-declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32(<vscale x 2 x i32>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32>*, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i32> @vpload_nxv2i32(<vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2i32:
@@ -162,11 +162,11 @@ define <vscale x 2 x i32> @vpload_nxv2i32(<vscale x 2 x i32>* %ptr, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32(<vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i32> %load
 }
 
-declare <vscale x 4 x i32> @llvm.vp.load.nxv4i32(<vscale x 4 x i32>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>*, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x i32> @vpload_nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4i32:
@@ -174,7 +174,7 @@ define <vscale x 4 x i32> @vpload_nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i32> %load
 }
 
@@ -188,11 +188,11 @@ define <vscale x 4 x i32> @vpload_nxv4i32_allones_mask(<vscale x 4 x i32>* %ptr,
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
   %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %b, i32 %evl)
+  %load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %b, i32 %evl)
   ret <vscale x 4 x i32> %load
 }
 
-declare <vscale x 8 x i32> @llvm.vp.load.nxv8i32(<vscale x 8 x i32>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0nxv8i32(<vscale x 8 x i32>*, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x i32> @vpload_nxv8i32(<vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8i32:
@@ -200,11 +200,11 @@ define <vscale x 8 x i32> @vpload_nxv8i32(<vscale x 8 x i32>* %ptr, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i32> @llvm.vp.load.nxv8i32(<vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0nxv8i32(<vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i32> %load
 }
 
-declare <vscale x 1 x i64> @llvm.vp.load.nxv1i64(<vscale x 1 x i64>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0nxv1i64(<vscale x 1 x i64>*, <vscale x 1 x i1>, i32)
 
 define <vscale x 1 x i64> @vpload_nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1i64:
@@ -212,7 +212,7 @@ define <vscale x 1 x i64> @vpload_nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x i64> %load
 }
 
@@ -226,11 +226,11 @@ define <vscale x 1 x i64> @vpload_nxv1i64_allones_mask(<vscale x 1 x i64>* %ptr,
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
   %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
+  %load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
   ret <vscale x 1 x i64> %load
 }
 
-declare <vscale x 2 x i64> @llvm.vp.load.nxv2i64(<vscale x 2 x i64>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>*, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x i64> @vpload_nxv2i64(<vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2i64:
@@ -238,11 +238,11 @@ define <vscale x 2 x i64> @vpload_nxv2i64(<vscale x 2 x i64>* %ptr, <vscale x 2
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64(<vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x i64> %load
 }
 
-declare <vscale x 4 x i64> @llvm.vp.load.nxv4i64(<vscale x 4 x i64>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0nxv4i64(<vscale x 4 x i64>*, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x i64> @vpload_nxv4i64(<vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4i64:
@@ -250,11 +250,11 @@ define <vscale x 4 x i64> @vpload_nxv4i64(<vscale x 4 x i64>* %ptr, <vscale x 4
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64(<vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0nxv4i64(<vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x i64> %load
 }
 
-declare <vscale x 8 x i64> @llvm.vp.load.nxv8i64(<vscale x 8 x i64>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0nxv8i64(<vscale x 8 x i64>*, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x i64> @vpload_nxv8i64(<vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8i64:
@@ -262,11 +262,11 @@ define <vscale x 8 x i64> @vpload_nxv8i64(<vscale x 8 x i64>* %ptr, <vscale x 8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64(<vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0nxv8i64(<vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x i64> %load
 }
 
-declare <vscale x 1 x half> @llvm.vp.load.nxv1f16(<vscale x 1 x half>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0nxv1f16(<vscale x 1 x half>*, <vscale x 1 x i1>, i32)
 
 define <vscale x 1 x half> @vpload_nxv1f16(<vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1f16:
@@ -274,11 +274,11 @@ define <vscale x 1 x half> @vpload_nxv1f16(<vscale x 1 x half>* %ptr, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x half> @llvm.vp.load.nxv1f16(<vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0nxv1f16(<vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x half> %load
 }
 
-declare <vscale x 2 x half> @llvm.vp.load.nxv2f16(<vscale x 2 x half>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0nxv2f16(<vscale x 2 x half>*, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x half> @vpload_nxv2f16(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2f16:
@@ -286,7 +286,7 @@ define <vscale x 2 x half> @vpload_nxv2f16(<vscale x 2 x half>* %ptr, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0nxv2f16(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x half> %load
 }
 
@@ -300,11 +300,11 @@ define <vscale x 2 x half> @vpload_nxv2f16_allones_mask(<vscale x 2 x half>* %pt
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 2 x i1> undef, i1 true, i32 0
   %b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
-  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %b, i32 %evl)
+  %load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0nxv2f16(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %b, i32 %evl)
   ret <vscale x 2 x half> %load
 }
 
-declare <vscale x 4 x half> @llvm.vp.load.nxv4f16(<vscale x 4 x half>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0nxv4f16(<vscale x 4 x half>*, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x half> @vpload_nxv4f16(<vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4f16:
@@ -312,11 +312,11 @@ define <vscale x 4 x half> @vpload_nxv4f16(<vscale x 4 x half>* %ptr, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x half> @llvm.vp.load.nxv4f16(<vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0nxv4f16(<vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x half> %load
 }
 
-declare <vscale x 8 x half> @llvm.vp.load.nxv8f16(<vscale x 8 x half>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>*, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x half> @vpload_nxv8f16(<vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8f16:
@@ -324,11 +324,11 @@ define <vscale x 8 x half> @vpload_nxv8f16(<vscale x 8 x half>* %ptr, <vscale x
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vle16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x half> @llvm.vp.load.nxv8f16(<vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x half> %load
 }
 
-declare <vscale x 1 x float> @llvm.vp.load.nxv1f32(<vscale x 1 x float>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0nxv1f32(<vscale x 1 x float>*, <vscale x 1 x i1>, i32)
 
 define <vscale x 1 x float> @vpload_nxv1f32(<vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1f32:
@@ -336,11 +336,11 @@ define <vscale x 1 x float> @vpload_nxv1f32(<vscale x 1 x float>* %ptr, <vscale
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x float> @llvm.vp.load.nxv1f32(<vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0nxv1f32(<vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x float> %load
 }
 
-declare <vscale x 2 x float> @llvm.vp.load.nxv2f32(<vscale x 2 x float>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0nxv2f32(<vscale x 2 x float>*, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x float> @vpload_nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2f32:
@@ -348,11 +348,11 @@ define <vscale x 2 x float> @vpload_nxv2f32(<vscale x 2 x float>* %ptr, <vscale
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x float> %load
 }
 
-declare <vscale x 4 x float> @llvm.vp.load.nxv4f32(<vscale x 4 x float>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>*, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x float> @vpload_nxv4f32(<vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4f32:
@@ -360,11 +360,11 @@ define <vscale x 4 x float> @vpload_nxv4f32(<vscale x 4 x float>* %ptr, <vscale
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x float> @llvm.vp.load.nxv4f32(<vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x float> %load
 }
 
-declare <vscale x 8 x float> @llvm.vp.load.nxv8f32(<vscale x 8 x float>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0nxv8f32(<vscale x 8 x float>*, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x float> @vpload_nxv8f32(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8f32:
@@ -372,7 +372,7 @@ define <vscale x 8 x float> @vpload_nxv8f32(<vscale x 8 x float>* %ptr, <vscale
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vle32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0nxv8f32(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x float> %load
 }
 
@@ -386,11 +386,11 @@ define <vscale x 8 x float> @vpload_nxv8f32_allones_mask(<vscale x 8 x float>* %
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 8 x i1> undef, i1 true, i32 0
   %b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
-  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %b, i32 %evl)
+  %load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0nxv8f32(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %b, i32 %evl)
   ret <vscale x 8 x float> %load
 }
 
-declare <vscale x 1 x double> @llvm.vp.load.nxv1f64(<vscale x 1 x double>*, <vscale x 1 x i1>, i32)
+declare <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0nxv1f64(<vscale x 1 x double>*, <vscale x 1 x i1>, i32)
 
 define <vscale x 1 x double> @vpload_nxv1f64(<vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv1f64:
@@ -398,11 +398,11 @@ define <vscale x 1 x double> @vpload_nxv1f64(<vscale x 1 x double>* %ptr, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 1 x double> @llvm.vp.load.nxv1f64(<vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  %load = call <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0nxv1f64(<vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret <vscale x 1 x double> %load
 }
 
-declare <vscale x 2 x double> @llvm.vp.load.nxv2f64(<vscale x 2 x double>*, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>*, <vscale x 2 x i1>, i32)
 
 define <vscale x 2 x double> @vpload_nxv2f64(<vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv2f64:
@@ -410,11 +410,11 @@ define <vscale x 2 x double> @vpload_nxv2f64(<vscale x 2 x double>* %ptr, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 2 x double> @llvm.vp.load.nxv2f64(<vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  %load = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret <vscale x 2 x double> %load
 }
 
-declare <vscale x 4 x double> @llvm.vp.load.nxv4f64(<vscale x 4 x double>*, <vscale x 4 x i1>, i32)
+declare <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0nxv4f64(<vscale x 4 x double>*, <vscale x 4 x i1>, i32)
 
 define <vscale x 4 x double> @vpload_nxv4f64(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv4f64:
@@ -422,7 +422,7 @@ define <vscale x 4 x double> @vpload_nxv4f64(<vscale x 4 x double>* %ptr, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0nxv4f64(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret <vscale x 4 x double> %load
 }
 
@@ -436,11 +436,11 @@ define <vscale x 4 x double> @vpload_nxv4f64_allones_mask(<vscale x 4 x double>*
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 4 x i1> undef, i1 true, i32 0
   %b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
-  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %b, i32 %evl)
+  %load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0nxv4f64(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %b, i32 %evl)
   ret <vscale x 4 x double> %load
 }
 
-declare <vscale x 8 x double> @llvm.vp.load.nxv8f64(<vscale x 8 x double>*, <vscale x 8 x i1>, i32)
+declare <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0nxv8f64(<vscale x 8 x double>*, <vscale x 8 x i1>, i32)
 
 define <vscale x 8 x double> @vpload_nxv8f64(<vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpload_nxv8f64:
@@ -448,6 +448,6 @@ define <vscale x 8 x double> @vpload_nxv8f64(<vscale x 8 x double>* %ptr, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vle64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  %load = call <vscale x 8 x double> @llvm.vp.load.nxv8f64(<vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  %load = call <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0nxv8f64(<vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret <vscale x 8 x double> %load
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index e9fdb943fc9e..3eba76f51c24 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -4,7 +4,7 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zfh,+experimental-v \
 ; RUN:     -verify-machineinstrs < %s | FileCheck %s
 
-declare void @llvm.vp.store.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1i8.p0nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>*, <vscale x 1 x i1>, i32)
 
 define void @vpstore_nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1i8:
@@ -12,11 +12,11 @@ define void @vpstore_nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, <vs
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1i8.p0nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2i8.p0nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, <vscale x 2 x i1>, i32)
 
 define void @vpstore_nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2i8:
@@ -24,11 +24,11 @@ define void @vpstore_nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %ptr, <vs
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2i8.p0nxv2i8(<vscale x 2 x i8> %val, <vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4i8.p0nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>*, <vscale x 4 x i1>, i32)
 
 define void @vpstore_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4i8:
@@ -36,11 +36,11 @@ define void @vpstore_nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %ptr, <vs
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4i8.p0nxv4i8(<vscale x 4 x i8> %val, <vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8i8.p0nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>*, <vscale x 8 x i1>, i32)
 
 define void @vpstore_nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8i8:
@@ -48,11 +48,11 @@ define void @vpstore_nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %ptr, <vs
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vse8.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8i8.p0nxv8i8(<vscale x 8 x i8> %val, <vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1i16.p0nxv1i16(<vscale x 1 x i16>, <vscale x 1 x i16>*, <vscale x 1 x i1>, i32)
 
 define void @vpstore_nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1i16:
@@ -60,11 +60,11 @@ define void @vpstore_nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1i16.p0nxv1i16(<vscale x 1 x i16> %val, <vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2i16.p0nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>*, <vscale x 2 x i1>, i32)
 
 define void @vpstore_nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2i16:
@@ -72,11 +72,11 @@ define void @vpstore_nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2i16.p0nxv2i16(<vscale x 2 x i16> %val, <vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4i16.p0nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>*, <vscale x 4 x i1>, i32)
 
 define void @vpstore_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4i16:
@@ -84,11 +84,11 @@ define void @vpstore_nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4i16.p0nxv4i16(<vscale x 4 x i16> %val, <vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>*, <vscale x 8 x i1>, i32)
 
 define void @vpstore_nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8i16:
@@ -96,11 +96,11 @@ define void @vpstore_nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8i16.p0nxv8i16(<vscale x 8 x i16> %val, <vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1i32.p0nxv1i32(<vscale x 1 x i32>, <vscale x 1 x i32>*, <vscale x 1 x i1>, i32)
 
 define void @vpstore_nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1i32:
@@ -108,11 +108,11 @@ define void @vpstore_nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1i32.p0nxv1i32(<vscale x 1 x i32> %val, <vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2i32.p0nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>*, <vscale x 2 x i1>, i32)
 
 define void @vpstore_nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2i32:
@@ -120,11 +120,11 @@ define void @vpstore_nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2i32.p0nxv2i32(<vscale x 2 x i32> %val, <vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>*, <vscale x 4 x i1>, i32)
 
 define void @vpstore_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4i32:
@@ -132,11 +132,11 @@ define void @vpstore_nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4i32.p0nxv4i32(<vscale x 4 x i32> %val, <vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8i32.p0nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>*, <vscale x 8 x i1>, i32)
 
 define void @vpstore_nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8i32:
@@ -144,11 +144,11 @@ define void @vpstore_nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8i32.p0nxv8i32(<vscale x 8 x i32> %val, <vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1i64.p0nxv1i64(<vscale x 1 x i64>, <vscale x 1 x i64>*, <vscale x 1 x i1>, i32)
 
 define void @vpstore_nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1i64:
@@ -156,11 +156,11 @@ define void @vpstore_nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1i64.p0nxv1i64(<vscale x 1 x i64> %val, <vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2i64.p0nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>*, <vscale x 2 x i1>, i32)
 
 define void @vpstore_nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2i64:
@@ -168,11 +168,11 @@ define void @vpstore_nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2i64.p0nxv2i64(<vscale x 2 x i64> %val, <vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4i64.p0nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>*, <vscale x 4 x i1>, i32)
 
 define void @vpstore_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4i64:
@@ -180,11 +180,11 @@ define void @vpstore_nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4i64.p0nxv4i64(<vscale x 4 x i64> %val, <vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8i64.p0nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>*, <vscale x 8 x i1>, i32)
 
 define void @vpstore_nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8i64:
@@ -192,11 +192,11 @@ define void @vpstore_nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %ptr,
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8i64.p0nxv8i64(<vscale x 8 x i64> %val, <vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1f16.p0nxv1f16(<vscale x 1 x half>, <vscale x 1 x half>*, <vscale x 1 x i1>, i32)
 
 define void @vpstore_nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1f16:
@@ -204,11 +204,11 @@ define void @vpstore_nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half>* %ptr
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1f16.p0nxv1f16(<vscale x 1 x half> %val, <vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2f16.p0nxv2f16(<vscale x 2 x half>, <vscale x 2 x half>*, <vscale x 2 x i1>, i32)
 
 define void @vpstore_nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2f16:
@@ -216,11 +216,11 @@ define void @vpstore_nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half>* %ptr
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2f16.p0nxv2f16(<vscale x 2 x half> %val, <vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4f16.p0nxv4f16(<vscale x 4 x half>, <vscale x 4 x half>*, <vscale x 4 x i1>, i32)
 
 define void @vpstore_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4f16:
@@ -228,11 +228,11 @@ define void @vpstore_nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half>* %ptr
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4f16.p0nxv4f16(<vscale x 4 x half> %val, <vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8f16.p0nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>*, <vscale x 8 x i1>, i32)
 
 define void @vpstore_nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8f16:
@@ -240,11 +240,11 @@ define void @vpstore_nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half>* %ptr
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vse16.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8f16.p0nxv8f16(<vscale x 8 x half> %val, <vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1f32.p0nxv1f32(<vscale x 1 x float>, <vscale x 1 x float>*, <vscale x 1 x i1>, i32)
 
 define void @vpstore_nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1f32:
@@ -252,11 +252,11 @@ define void @vpstore_nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float>* %p
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1f32.p0nxv1f32(<vscale x 1 x float> %val, <vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2f32.p0nxv2f32(<vscale x 2 x float>, <vscale x 2 x float>*, <vscale x 2 x i1>, i32)
 
 define void @vpstore_nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2f32:
@@ -264,11 +264,11 @@ define void @vpstore_nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float>* %p
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2f32.p0nxv2f32(<vscale x 2 x float> %val, <vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4f32.p0nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>*, <vscale x 4 x i1>, i32)
 
 define void @vpstore_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4f32:
@@ -276,11 +276,11 @@ define void @vpstore_nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float>* %p
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4f32.p0nxv4f32(<vscale x 4 x float> %val, <vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8f32.p0nxv8f32(<vscale x 8 x float>, <vscale x 8 x float>*, <vscale x 8 x i1>, i32)
 
 define void @vpstore_nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8f32:
@@ -288,11 +288,11 @@ define void @vpstore_nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float>* %p
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vse32.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8f32.p0nxv8f32(<vscale x 8 x float> %val, <vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>*, <vscale x 1 x i1>, i32)
+declare void @llvm.vp.store.nxv1f64.p0nxv1f64(<vscale x 1 x double>, <vscale x 1 x double>*, <vscale x 1 x i1>, i32)
 
 define void @vpstore_nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv1f64:
@@ -300,11 +300,11 @@ define void @vpstore_nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double>*
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv1f64.p0nxv1f64(<vscale x 1 x double> %val, <vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>*, <vscale x 2 x i1>, i32)
+declare void @llvm.vp.store.nxv2f64.p0nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>*, <vscale x 2 x i1>, i32)
 
 define void @vpstore_nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv2f64:
@@ -312,11 +312,11 @@ define void @vpstore_nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double>*
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv2f64.p0nxv2f64(<vscale x 2 x double> %val, <vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>*, <vscale x 4 x i1>, i32)
+declare void @llvm.vp.store.nxv4f64.p0nxv4f64(<vscale x 4 x double>, <vscale x 4 x double>*, <vscale x 4 x i1>, i32)
 
 define void @vpstore_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv4f64:
@@ -324,11 +324,11 @@ define void @vpstore_nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double>*
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv4f64.p0nxv4f64(<vscale x 4 x double> %val, <vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
   ret void
 }
 
-declare void @llvm.vp.store.nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>*, <vscale x 8 x i1>, i32)
+declare void @llvm.vp.store.nxv8f64.p0nxv8f64(<vscale x 8 x double>, <vscale x 8 x double>*, <vscale x 8 x i1>, i32)
 
 define void @vpstore_nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vpstore_nxv8f64:
@@ -336,7 +336,7 @@ define void @vpstore_nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double>*
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vse64.v v8, (a0), v0.t
 ; CHECK-NEXT:    ret
-  call void @llvm.vp.store.nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
+  call void @llvm.vp.store.nxv8f64.p0nxv8f64(<vscale x 8 x double> %val, <vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
   ret void
 }
 
@@ -348,6 +348,6 @@ define void @vpstore_nxv1i8_allones_mask(<vscale x 1 x i8> %val, <vscale x 1 x i
 ; CHECK-NEXT:    ret
   %a = insertelement <vscale x 1 x i1> undef, i1 true, i32 0
   %b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
-  call void @llvm.vp.store.nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
+  call void @llvm.vp.store.nxv1i8.p0nxv1i8(<vscale x 1 x i8> %val, <vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %b, i32 %evl)
   ret void
 }


        


More information about the llvm-commits mailing list